linux/drivers/gpu/drm/i915/i915_dma.c
<<
>>
Prefs
   1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
   2 */
   3/*
   4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 */
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <drm/drmP.h>
  32#include <drm/drm_crtc_helper.h>
  33#include <drm/drm_fb_helper.h>
  34#include <drm/drm_legacy.h>
  35#include "intel_drv.h"
  36#include <drm/i915_drm.h>
  37#include "i915_drv.h"
  38#include "i915_vgpu.h"
  39#include "i915_trace.h"
  40#include <linux/pci.h>
  41#include <linux/console.h>
  42#include <linux/vt.h>
  43#include <linux/vgaarb.h>
  44#include <linux/acpi.h>
  45#include <linux/pnp.h>
  46#include <linux/vga_switcheroo.h>
  47#include <linux/slab.h>
  48#include <acpi/video.h>
  49#include <linux/pm.h>
  50#include <linux/pm_runtime.h>
  51#include <linux/oom.h>
  52
  53
  54static int i915_getparam(struct drm_device *dev, void *data,
  55                         struct drm_file *file_priv)
  56{
  57        struct drm_i915_private *dev_priv = dev->dev_private;
  58        drm_i915_getparam_t *param = data;
  59        int value;
  60
  61        switch (param->param) {
  62        case I915_PARAM_IRQ_ACTIVE:
  63        case I915_PARAM_ALLOW_BATCHBUFFER:
  64        case I915_PARAM_LAST_DISPATCH:
  65                /* Reject all old ums/dri params. */
  66                return -ENODEV;
  67        case I915_PARAM_CHIPSET_ID:
  68                value = dev->pdev->device;
  69                break;
  70        case I915_PARAM_REVISION:
  71                value = dev->pdev->revision;
  72                break;
  73        case I915_PARAM_HAS_GEM:
  74                value = 1;
  75                break;
  76        case I915_PARAM_NUM_FENCES_AVAIL:
  77                value = dev_priv->num_fence_regs;
  78                break;
  79        case I915_PARAM_HAS_OVERLAY:
  80                value = dev_priv->overlay ? 1 : 0;
  81                break;
  82        case I915_PARAM_HAS_PAGEFLIPPING:
  83                value = 1;
  84                break;
  85        case I915_PARAM_HAS_EXECBUF2:
  86                /* depends on GEM */
  87                value = 1;
  88                break;
  89        case I915_PARAM_HAS_BSD:
  90                value = intel_ring_initialized(&dev_priv->ring[VCS]);
  91                break;
  92        case I915_PARAM_HAS_BLT:
  93                value = intel_ring_initialized(&dev_priv->ring[BCS]);
  94                break;
  95        case I915_PARAM_HAS_VEBOX:
  96                value = intel_ring_initialized(&dev_priv->ring[VECS]);
  97                break;
  98        case I915_PARAM_HAS_BSD2:
  99                value = intel_ring_initialized(&dev_priv->ring[VCS2]);
 100                break;
 101        case I915_PARAM_HAS_RELAXED_FENCING:
 102                value = 1;
 103                break;
 104        case I915_PARAM_HAS_COHERENT_RINGS:
 105                value = 1;
 106                break;
 107        case I915_PARAM_HAS_EXEC_CONSTANTS:
 108                value = INTEL_INFO(dev)->gen >= 4;
 109                break;
 110        case I915_PARAM_HAS_RELAXED_DELTA:
 111                value = 1;
 112                break;
 113        case I915_PARAM_HAS_GEN7_SOL_RESET:
 114                value = 1;
 115                break;
 116        case I915_PARAM_HAS_LLC:
 117                value = HAS_LLC(dev);
 118                break;
 119        case I915_PARAM_HAS_WT:
 120                value = HAS_WT(dev);
 121                break;
 122        case I915_PARAM_HAS_ALIASING_PPGTT:
 123                value = USES_PPGTT(dev);
 124                break;
 125        case I915_PARAM_HAS_WAIT_TIMEOUT:
 126                value = 1;
 127                break;
 128        case I915_PARAM_HAS_SEMAPHORES:
 129                value = i915_semaphore_is_enabled(dev);
 130                break;
 131        case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
 132                value = 1;
 133                break;
 134        case I915_PARAM_HAS_SECURE_BATCHES:
 135                value = capable(CAP_SYS_ADMIN);
 136                break;
 137        case I915_PARAM_HAS_PINNED_BATCHES:
 138                value = 1;
 139                break;
 140        case I915_PARAM_HAS_EXEC_NO_RELOC:
 141                value = 1;
 142                break;
 143        case I915_PARAM_HAS_EXEC_HANDLE_LUT:
 144                value = 1;
 145                break;
 146        case I915_PARAM_CMD_PARSER_VERSION:
 147                value = i915_cmd_parser_get_version();
 148                break;
 149        case I915_PARAM_HAS_COHERENT_PHYS_GTT:
 150                value = 1;
 151                break;
 152        case I915_PARAM_MMAP_VERSION:
 153                value = 1;
 154                break;
 155        case I915_PARAM_SUBSLICE_TOTAL:
 156                value = INTEL_INFO(dev)->subslice_total;
 157                if (!value)
 158                        return -ENODEV;
 159                break;
 160        case I915_PARAM_EU_TOTAL:
 161                value = INTEL_INFO(dev)->eu_total;
 162                if (!value)
 163                        return -ENODEV;
 164                break;
 165        case I915_PARAM_HAS_GPU_RESET:
 166                value = i915.enable_hangcheck &&
 167                        intel_has_gpu_reset(dev);
 168                break;
 169        case I915_PARAM_HAS_RESOURCE_STREAMER:
 170                value = HAS_RESOURCE_STREAMER(dev);
 171                break;
 172        case I915_PARAM_HAS_EXEC_SOFTPIN:
 173                value = 1;
 174                break;
 175        default:
 176                DRM_DEBUG("Unknown parameter %d\n", param->param);
 177                return -EINVAL;
 178        }
 179
 180        if (copy_to_user(param->value, &value, sizeof(int))) {
 181                DRM_ERROR("copy_to_user failed\n");
 182                return -EFAULT;
 183        }
 184
 185        return 0;
 186}
 187
 188static int i915_get_bridge_dev(struct drm_device *dev)
 189{
 190        struct drm_i915_private *dev_priv = dev->dev_private;
 191
 192        dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
 193        if (!dev_priv->bridge_dev) {
 194                DRM_ERROR("bridge device not found\n");
 195                return -1;
 196        }
 197        return 0;
 198}
 199
 200#define MCHBAR_I915 0x44
 201#define MCHBAR_I965 0x48
 202#define MCHBAR_SIZE (4*4096)
 203
 204#define DEVEN_REG 0x54
 205#define   DEVEN_MCHBAR_EN (1 << 28)
 206
 207/* Allocate space for the MCH regs if needed, return nonzero on error */
 208static int
 209intel_alloc_mchbar_resource(struct drm_device *dev)
 210{
 211        struct drm_i915_private *dev_priv = dev->dev_private;
 212        int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 213        u32 temp_lo, temp_hi = 0;
 214        u64 mchbar_addr;
 215        int ret;
 216
 217        if (INTEL_INFO(dev)->gen >= 4)
 218                pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
 219        pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
 220        mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
 221
 222        /* If ACPI doesn't have it, assume we need to allocate it ourselves */
 223#ifdef CONFIG_PNP
 224        if (mchbar_addr &&
 225            pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
 226                return 0;
 227#endif
 228
 229        /* Get some space for it */
 230        dev_priv->mch_res.name = "i915 MCHBAR";
 231        dev_priv->mch_res.flags = IORESOURCE_MEM;
 232        ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
 233                                     &dev_priv->mch_res,
 234                                     MCHBAR_SIZE, MCHBAR_SIZE,
 235                                     PCIBIOS_MIN_MEM,
 236                                     0, pcibios_align_resource,
 237                                     dev_priv->bridge_dev);
 238        if (ret) {
 239                DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
 240                dev_priv->mch_res.start = 0;
 241                return ret;
 242        }
 243
 244        if (INTEL_INFO(dev)->gen >= 4)
 245                pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
 246                                       upper_32_bits(dev_priv->mch_res.start));
 247
 248        pci_write_config_dword(dev_priv->bridge_dev, reg,
 249                               lower_32_bits(dev_priv->mch_res.start));
 250        return 0;
 251}
 252
 253/* Setup MCHBAR if possible, return true if we should disable it again */
 254static void
 255intel_setup_mchbar(struct drm_device *dev)
 256{
 257        struct drm_i915_private *dev_priv = dev->dev_private;
 258        int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 259        u32 temp;
 260        bool enabled;
 261
 262        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
 263                return;
 264
 265        dev_priv->mchbar_need_disable = false;
 266
 267        if (IS_I915G(dev) || IS_I915GM(dev)) {
 268                pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
 269                enabled = !!(temp & DEVEN_MCHBAR_EN);
 270        } else {
 271                pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
 272                enabled = temp & 1;
 273        }
 274
 275        /* If it's already enabled, don't have to do anything */
 276        if (enabled)
 277                return;
 278
 279        if (intel_alloc_mchbar_resource(dev))
 280                return;
 281
 282        dev_priv->mchbar_need_disable = true;
 283
 284        /* Space is allocated or reserved, so enable it. */
 285        if (IS_I915G(dev) || IS_I915GM(dev)) {
 286                pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
 287                                       temp | DEVEN_MCHBAR_EN);
 288        } else {
 289                pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
 290                pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
 291        }
 292}
 293
 294static void
 295intel_teardown_mchbar(struct drm_device *dev)
 296{
 297        struct drm_i915_private *dev_priv = dev->dev_private;
 298        int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 299        u32 temp;
 300
 301        if (dev_priv->mchbar_need_disable) {
 302                if (IS_I915G(dev) || IS_I915GM(dev)) {
 303                        pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
 304                        temp &= ~DEVEN_MCHBAR_EN;
 305                        pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
 306                } else {
 307                        pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
 308                        temp &= ~1;
 309                        pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
 310                }
 311        }
 312
 313        if (dev_priv->mch_res.start)
 314                release_resource(&dev_priv->mch_res);
 315}
 316
 317/* true = enable decode, false = disable decoder */
 318static unsigned int i915_vga_set_decode(void *cookie, bool state)
 319{
 320        struct drm_device *dev = cookie;
 321
 322        intel_modeset_vga_set_state(dev, state);
 323        if (state)
 324                return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
 325                       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 326        else
 327                return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 328}
 329
 330static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 331{
 332        struct drm_device *dev = pci_get_drvdata(pdev);
 333        pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 334
 335        if (state == VGA_SWITCHEROO_ON) {
 336                pr_info("switched on\n");
 337                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 338                /* i915 resume handler doesn't set to D0 */
 339                pci_set_power_state(dev->pdev, PCI_D0);
 340                i915_resume_switcheroo(dev);
 341                dev->switch_power_state = DRM_SWITCH_POWER_ON;
 342        } else {
 343                pr_info("switched off\n");
 344                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 345                i915_suspend_switcheroo(dev, pmm);
 346                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 347        }
 348}
 349
 350static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
 351{
 352        struct drm_device *dev = pci_get_drvdata(pdev);
 353
 354        /*
 355         * FIXME: open_count is protected by drm_global_mutex but that would lead to
 356         * locking inversion with the driver load path. And the access here is
 357         * completely racy anyway. So don't bother with locking for now.
 358         */
 359        return dev->open_count == 0;
 360}
 361
 362static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
 363        .set_gpu_state = i915_switcheroo_set_state,
 364        .reprobe = NULL,
 365        .can_switch = i915_switcheroo_can_switch,
 366};
 367
 368static int i915_load_modeset_init(struct drm_device *dev)
 369{
 370        struct drm_i915_private *dev_priv = dev->dev_private;
 371        int ret;
 372
 373        ret = intel_bios_init(dev_priv);
 374        if (ret)
 375                DRM_INFO("failed to find VBIOS tables\n");
 376
 377        /* If we have > 1 VGA cards, then we need to arbitrate access
 378         * to the common VGA resources.
 379         *
 380         * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
 381         * then we do not take part in VGA arbitration and the
 382         * vga_client_register() fails with -ENODEV.
 383         */
 384        ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
 385        if (ret && ret != -ENODEV)
 386                goto out;
 387
 388        intel_register_dsm_handler();
 389
 390        ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
 391        if (ret)
 392                goto cleanup_vga_client;
 393
 394        intel_power_domains_init_hw(dev_priv, false);
 395
 396        intel_csr_ucode_init(dev_priv);
 397
 398        ret = intel_irq_install(dev_priv);
 399        if (ret)
 400                goto cleanup_csr;
 401
 402        intel_setup_gmbus(dev);
 403
 404        /* Important: The output setup functions called by modeset_init need
 405         * working irqs for e.g. gmbus and dp aux transfers. */
 406        intel_modeset_init(dev);
 407
 408        intel_guc_ucode_init(dev);
 409
 410        ret = i915_gem_init(dev);
 411        if (ret)
 412                goto cleanup_irq;
 413
 414        intel_modeset_gem_init(dev);
 415
 416        /* Always safe in the mode setting case. */
 417        /* FIXME: do pre/post-mode set stuff in core KMS code */
 418        dev->vblank_disable_allowed = true;
 419        if (INTEL_INFO(dev)->num_pipes == 0)
 420                return 0;
 421
 422        ret = intel_fbdev_init(dev);
 423        if (ret)
 424                goto cleanup_gem;
 425
 426        /* Only enable hotplug handling once the fbdev is fully set up. */
 427        intel_hpd_init(dev_priv);
 428
 429        /*
 430         * Some ports require correctly set-up hpd registers for detection to
 431         * work properly (leading to ghost connected connector status), e.g. VGA
 432         * on gm45.  Hence we can only set up the initial fbdev config after hpd
 433         * irqs are fully enabled. Now we should scan for the initial config
 434         * only once hotplug handling is enabled, but due to screwed-up locking
 435         * around kms/fbdev init we can't protect the fdbev initial config
 436         * scanning against hotplug events. Hence do this first and ignore the
 437         * tiny window where we will loose hotplug notifactions.
 438         */
 439        intel_fbdev_initial_config_async(dev);
 440
 441        drm_kms_helper_poll_init(dev);
 442
 443        return 0;
 444
 445cleanup_gem:
 446        mutex_lock(&dev->struct_mutex);
 447        i915_gem_cleanup_ringbuffer(dev);
 448        i915_gem_context_fini(dev);
 449        mutex_unlock(&dev->struct_mutex);
 450cleanup_irq:
 451        intel_guc_ucode_fini(dev);
 452        drm_irq_uninstall(dev);
 453        intel_teardown_gmbus(dev);
 454cleanup_csr:
 455        intel_csr_ucode_fini(dev_priv);
 456        vga_switcheroo_unregister_client(dev->pdev);
 457cleanup_vga_client:
 458        vga_client_register(dev->pdev, NULL, NULL, NULL);
 459out:
 460        return ret;
 461}
 462
 463#if IS_ENABLED(CONFIG_FB)
 464static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 465{
 466        struct apertures_struct *ap;
 467        struct pci_dev *pdev = dev_priv->dev->pdev;
 468        bool primary;
 469        int ret;
 470
 471        ap = alloc_apertures(1);
 472        if (!ap)
 473                return -ENOMEM;
 474
 475        ap->ranges[0].base = dev_priv->gtt.mappable_base;
 476        ap->ranges[0].size = dev_priv->gtt.mappable_end;
 477
 478        primary =
 479                pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 480
 481        ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
 482
 483        kfree(ap);
 484
 485        return ret;
 486}
 487#else
 488static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 489{
 490        return 0;
 491}
 492#endif
 493
 494#if !defined(CONFIG_VGA_CONSOLE)
 495static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
 496{
 497        return 0;
 498}
 499#elif !defined(CONFIG_DUMMY_CONSOLE)
 500static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
 501{
 502        return -ENODEV;
 503}
 504#else
 505static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
 506{
 507        int ret = 0;
 508
 509        DRM_INFO("Replacing VGA console driver\n");
 510
 511        console_lock();
 512        if (con_is_bound(&vga_con))
 513                ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
 514        if (ret == 0) {
 515                ret = do_unregister_con_driver(&vga_con);
 516
 517                /* Ignore "already unregistered". */
 518                if (ret == -ENODEV)
 519                        ret = 0;
 520        }
 521        console_unlock();
 522
 523        return ret;
 524}
 525#endif
 526
 527static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 528{
 529        const struct intel_device_info *info = &dev_priv->info;
 530
 531#define PRINT_S(name) "%s"
 532#define SEP_EMPTY
 533#define PRINT_FLAG(name) info->name ? #name "," : ""
 534#define SEP_COMMA ,
 535        DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
 536                         DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
 537                         info->gen,
 538                         dev_priv->dev->pdev->device,
 539                         dev_priv->dev->pdev->revision,
 540                         DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
 541#undef PRINT_S
 542#undef SEP_EMPTY
 543#undef PRINT_FLAG
 544#undef SEP_COMMA
 545}
 546
 547static void cherryview_sseu_info_init(struct drm_device *dev)
 548{
 549        struct drm_i915_private *dev_priv = dev->dev_private;
 550        struct intel_device_info *info;
 551        u32 fuse, eu_dis;
 552
 553        info = (struct intel_device_info *)&dev_priv->info;
 554        fuse = I915_READ(CHV_FUSE_GT);
 555
 556        info->slice_total = 1;
 557
 558        if (!(fuse & CHV_FGT_DISABLE_SS0)) {
 559                info->subslice_per_slice++;
 560                eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
 561                                 CHV_FGT_EU_DIS_SS0_R1_MASK);
 562                info->eu_total += 8 - hweight32(eu_dis);
 563        }
 564
 565        if (!(fuse & CHV_FGT_DISABLE_SS1)) {
 566                info->subslice_per_slice++;
 567                eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
 568                                 CHV_FGT_EU_DIS_SS1_R1_MASK);
 569                info->eu_total += 8 - hweight32(eu_dis);
 570        }
 571
 572        info->subslice_total = info->subslice_per_slice;
 573        /*
 574         * CHV expected to always have a uniform distribution of EU
 575         * across subslices.
 576        */
 577        info->eu_per_subslice = info->subslice_total ?
 578                                info->eu_total / info->subslice_total :
 579                                0;
 580        /*
 581         * CHV supports subslice power gating on devices with more than
 582         * one subslice, and supports EU power gating on devices with
 583         * more than one EU pair per subslice.
 584        */
 585        info->has_slice_pg = 0;
 586        info->has_subslice_pg = (info->subslice_total > 1);
 587        info->has_eu_pg = (info->eu_per_subslice > 2);
 588}
 589
 590static void gen9_sseu_info_init(struct drm_device *dev)
 591{
 592        struct drm_i915_private *dev_priv = dev->dev_private;
 593        struct intel_device_info *info;
 594        int s_max = 3, ss_max = 4, eu_max = 8;
 595        int s, ss;
 596        u32 fuse2, s_enable, ss_disable, eu_disable;
 597        u8 eu_mask = 0xff;
 598
 599        info = (struct intel_device_info *)&dev_priv->info;
 600        fuse2 = I915_READ(GEN8_FUSE2);
 601        s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
 602                   GEN8_F2_S_ENA_SHIFT;
 603        ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
 604                     GEN9_F2_SS_DIS_SHIFT;
 605
 606        info->slice_total = hweight32(s_enable);
 607        /*
 608         * The subslice disable field is global, i.e. it applies
 609         * to each of the enabled slices.
 610        */
 611        info->subslice_per_slice = ss_max - hweight32(ss_disable);
 612        info->subslice_total = info->slice_total *
 613                               info->subslice_per_slice;
 614
 615        /*
 616         * Iterate through enabled slices and subslices to
 617         * count the total enabled EU.
 618        */
 619        for (s = 0; s < s_max; s++) {
 620                if (!(s_enable & (0x1 << s)))
 621                        /* skip disabled slice */
 622                        continue;
 623
 624                eu_disable = I915_READ(GEN9_EU_DISABLE(s));
 625                for (ss = 0; ss < ss_max; ss++) {
 626                        int eu_per_ss;
 627
 628                        if (ss_disable & (0x1 << ss))
 629                                /* skip disabled subslice */
 630                                continue;
 631
 632                        eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
 633                                                      eu_mask);
 634
 635                        /*
 636                         * Record which subslice(s) has(have) 7 EUs. we
 637                         * can tune the hash used to spread work among
 638                         * subslices if they are unbalanced.
 639                         */
 640                        if (eu_per_ss == 7)
 641                                info->subslice_7eu[s] |= 1 << ss;
 642
 643                        info->eu_total += eu_per_ss;
 644                }
 645        }
 646
 647        /*
 648         * SKL is expected to always have a uniform distribution
 649         * of EU across subslices with the exception that any one
 650         * EU in any one subslice may be fused off for die
 651         * recovery. BXT is expected to be perfectly uniform in EU
 652         * distribution.
 653        */
 654        info->eu_per_subslice = info->subslice_total ?
 655                                DIV_ROUND_UP(info->eu_total,
 656                                             info->subslice_total) : 0;
 657        /*
 658         * SKL supports slice power gating on devices with more than
 659         * one slice, and supports EU power gating on devices with
 660         * more than one EU pair per subslice. BXT supports subslice
 661         * power gating on devices with more than one subslice, and
 662         * supports EU power gating on devices with more than one EU
 663         * pair per subslice.
 664        */
 665        info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
 666                               (info->slice_total > 1));
 667        info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
 668        info->has_eu_pg = (info->eu_per_subslice > 2);
 669}
 670
 671static void broadwell_sseu_info_init(struct drm_device *dev)
 672{
 673        struct drm_i915_private *dev_priv = dev->dev_private;
 674        struct intel_device_info *info;
 675        const int s_max = 3, ss_max = 3, eu_max = 8;
 676        int s, ss;
 677        u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
 678
 679        fuse2 = I915_READ(GEN8_FUSE2);
 680        s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
 681        ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
 682
 683        eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
 684        eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
 685                        ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
 686                         (32 - GEN8_EU_DIS0_S1_SHIFT));
 687        eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
 688                        ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
 689                         (32 - GEN8_EU_DIS1_S2_SHIFT));
 690
 691
 692        info = (struct intel_device_info *)&dev_priv->info;
 693        info->slice_total = hweight32(s_enable);
 694
 695        /*
 696         * The subslice disable field is global, i.e. it applies
 697         * to each of the enabled slices.
 698         */
 699        info->subslice_per_slice = ss_max - hweight32(ss_disable);
 700        info->subslice_total = info->slice_total * info->subslice_per_slice;
 701
 702        /*
 703         * Iterate through enabled slices and subslices to
 704         * count the total enabled EU.
 705         */
 706        for (s = 0; s < s_max; s++) {
 707                if (!(s_enable & (0x1 << s)))
 708                        /* skip disabled slice */
 709                        continue;
 710
 711                for (ss = 0; ss < ss_max; ss++) {
 712                        u32 n_disabled;
 713
 714                        if (ss_disable & (0x1 << ss))
 715                                /* skip disabled subslice */
 716                                continue;
 717
 718                        n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
 719
 720                        /*
 721                         * Record which subslices have 7 EUs.
 722                         */
 723                        if (eu_max - n_disabled == 7)
 724                                info->subslice_7eu[s] |= 1 << ss;
 725
 726                        info->eu_total += eu_max - n_disabled;
 727                }
 728        }
 729
 730        /*
 731         * BDW is expected to always have a uniform distribution of EU across
 732         * subslices with the exception that any one EU in any one subslice may
 733         * be fused off for die recovery.
 734         */
 735        info->eu_per_subslice = info->subslice_total ?
 736                DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
 737
 738        /*
 739         * BDW supports slice power gating on devices with more than
 740         * one slice.
 741         */
 742        info->has_slice_pg = (info->slice_total > 1);
 743        info->has_subslice_pg = 0;
 744        info->has_eu_pg = 0;
 745}
 746
 747/*
 748 * Determine various intel_device_info fields at runtime.
 749 *
 750 * Use it when either:
 751 *   - it's judged too laborious to fill n static structures with the limit
 752 *     when a simple if statement does the job,
 753 *   - run-time checks (eg read fuse/strap registers) are needed.
 754 *
 755 * This function needs to be called:
 756 *   - after the MMIO has been setup as we are reading registers,
 757 *   - after the PCH has been detected,
 758 *   - before the first usage of the fields it can tweak.
 759 */
 760static void intel_device_info_runtime_init(struct drm_device *dev)
 761{
 762        struct drm_i915_private *dev_priv = dev->dev_private;
 763        struct intel_device_info *info;
 764        enum pipe pipe;
 765
 766        info = (struct intel_device_info *)&dev_priv->info;
 767
 768        /*
 769         * Skylake and Broxton currently don't expose the topmost plane as its
 770         * use is exclusive with the legacy cursor and we only want to expose
 771         * one of those, not both. Until we can safely expose the topmost plane
 772         * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
 773         * we don't expose the topmost plane at all to prevent ABI breakage
 774         * down the line.
 775         */
 776        if (IS_BROXTON(dev)) {
 777                info->num_sprites[PIPE_A] = 2;
 778                info->num_sprites[PIPE_B] = 2;
 779                info->num_sprites[PIPE_C] = 1;
 780        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
 781                for_each_pipe(dev_priv, pipe)
 782                        info->num_sprites[pipe] = 2;
 783        else
 784                for_each_pipe(dev_priv, pipe)
 785                        info->num_sprites[pipe] = 1;
 786
 787        if (i915.disable_display) {
 788                DRM_INFO("Display disabled (module parameter)\n");
 789                info->num_pipes = 0;
 790        } else if (info->num_pipes > 0 &&
 791                   (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
 792                   HAS_PCH_SPLIT(dev)) {
 793                u32 fuse_strap = I915_READ(FUSE_STRAP);
 794                u32 sfuse_strap = I915_READ(SFUSE_STRAP);
 795
 796                /*
 797                 * SFUSE_STRAP is supposed to have a bit signalling the display
 798                 * is fused off. Unfortunately it seems that, at least in
 799                 * certain cases, fused off display means that PCH display
 800                 * reads don't land anywhere. In that case, we read 0s.
 801                 *
 802                 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
 803                 * should be set when taking over after the firmware.
 804                 */
 805                if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
 806                    sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
 807                    (dev_priv->pch_type == PCH_CPT &&
 808                     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
 809                        DRM_INFO("Display fused off, disabling\n");
 810                        info->num_pipes = 0;
 811                } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
 812                        DRM_INFO("PipeC fused off\n");
 813                        info->num_pipes -= 1;
 814                }
 815        } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
 816                u32 dfsm = I915_READ(SKL_DFSM);
 817                u8 disabled_mask = 0;
 818                bool invalid;
 819                int num_bits;
 820
 821                if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
 822                        disabled_mask |= BIT(PIPE_A);
 823                if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
 824                        disabled_mask |= BIT(PIPE_B);
 825                if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
 826                        disabled_mask |= BIT(PIPE_C);
 827
 828                num_bits = hweight8(disabled_mask);
 829
 830                switch (disabled_mask) {
 831                case BIT(PIPE_A):
 832                case BIT(PIPE_B):
 833                case BIT(PIPE_A) | BIT(PIPE_B):
 834                case BIT(PIPE_A) | BIT(PIPE_C):
 835                        invalid = true;
 836                        break;
 837                default:
 838                        invalid = false;
 839                }
 840
 841                if (num_bits > info->num_pipes || invalid)
 842                        DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
 843                                  disabled_mask);
 844                else
 845                        info->num_pipes -= num_bits;
 846        }
 847
 848        /* Initialize slice/subslice/EU info */
 849        if (IS_CHERRYVIEW(dev))
 850                cherryview_sseu_info_init(dev);
 851        else if (IS_BROADWELL(dev))
 852                broadwell_sseu_info_init(dev);
 853        else if (INTEL_INFO(dev)->gen >= 9)
 854                gen9_sseu_info_init(dev);
 855
 856        DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
 857        DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
 858        DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
 859        DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
 860        DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
 861        DRM_DEBUG_DRIVER("has slice power gating: %s\n",
 862                         info->has_slice_pg ? "y" : "n");
 863        DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
 864                         info->has_subslice_pg ? "y" : "n");
 865        DRM_DEBUG_DRIVER("has EU power gating: %s\n",
 866                         info->has_eu_pg ? "y" : "n");
 867}
 868
 869static void intel_init_dpio(struct drm_i915_private *dev_priv)
 870{
 871        /*
 872         * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
 873         * CHV x1 PHY (DP/HDMI D)
 874         * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
 875         */
 876        if (IS_CHERRYVIEW(dev_priv)) {
 877                DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
 878                DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
 879        } else if (IS_VALLEYVIEW(dev_priv)) {
 880                DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
 881        }
 882}
 883
 884static int i915_workqueues_init(struct drm_i915_private *dev_priv)
 885{
 886        /*
 887         * The i915 workqueue is primarily used for batched retirement of
 888         * requests (and thus managing bo) once the task has been completed
 889         * by the GPU. i915_gem_retire_requests() is called directly when we
 890         * need high-priority retirement, such as waiting for an explicit
 891         * bo.
 892         *
 893         * It is also used for periodic low-priority events, such as
 894         * idle-timers and recording error state.
 895         *
 896         * All tasks on the workqueue are expected to acquire the dev mutex
 897         * so there is no point in running more than one instance of the
 898         * workqueue at any time.  Use an ordered one.
 899         */
 900        dev_priv->wq = alloc_ordered_workqueue("i915", 0);
 901        if (dev_priv->wq == NULL)
 902                goto out_err;
 903
 904        dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
 905        if (dev_priv->hotplug.dp_wq == NULL)
 906                goto out_free_wq;
 907
 908        dev_priv->gpu_error.hangcheck_wq =
 909                alloc_ordered_workqueue("i915-hangcheck", 0);
 910        if (dev_priv->gpu_error.hangcheck_wq == NULL)
 911                goto out_free_dp_wq;
 912
 913        return 0;
 914
 915out_free_dp_wq:
 916        destroy_workqueue(dev_priv->hotplug.dp_wq);
 917out_free_wq:
 918        destroy_workqueue(dev_priv->wq);
 919out_err:
 920        DRM_ERROR("Failed to allocate workqueues.\n");
 921
 922        return -ENOMEM;
 923}
 924
 925static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
 926{
 927        destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
 928        destroy_workqueue(dev_priv->hotplug.dp_wq);
 929        destroy_workqueue(dev_priv->wq);
 930}
 931
 932static int i915_mmio_setup(struct drm_device *dev)
 933{
 934        struct drm_i915_private *dev_priv = to_i915(dev);
 935        int mmio_bar;
 936        int mmio_size;
 937
 938        mmio_bar = IS_GEN2(dev) ? 1 : 0;
 939        /*
 940         * Before gen4, the registers and the GTT are behind different BARs.
 941         * However, from gen4 onwards, the registers and the GTT are shared
 942         * in the same BAR, so we want to restrict this ioremap from
 943         * clobbering the GTT which we want ioremap_wc instead. Fortunately,
 944         * the register BAR remains the same size for all the earlier
 945         * generations up to Ironlake.
 946         */
 947        if (INTEL_INFO(dev)->gen < 5)
 948                mmio_size = 512 * 1024;
 949        else
 950                mmio_size = 2 * 1024 * 1024;
 951        dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
 952        if (dev_priv->regs == NULL) {
 953                DRM_ERROR("failed to map registers\n");
 954
 955                return -EIO;
 956        }
 957
 958        /* Try to make sure MCHBAR is enabled before poking at it */
 959        intel_setup_mchbar(dev);
 960
 961        return 0;
 962}
 963
 964static void i915_mmio_cleanup(struct drm_device *dev)
 965{
 966        struct drm_i915_private *dev_priv = to_i915(dev);
 967
 968        intel_teardown_mchbar(dev);
 969        pci_iounmap(dev->pdev, dev_priv->regs);
 970}
 971
 972/**
 973 * i915_driver_load - setup chip and create an initial config
 974 * @dev: DRM device
 975 * @flags: startup flags
 976 *
 977 * The driver load routine has to do several things:
 978 *   - drive output discovery via intel_modeset_init()
 979 *   - initialize the memory manager
 980 *   - allocate initial config memory
 981 *   - setup the DRM framebuffer with the allocated memory
 982 */
 983int i915_driver_load(struct drm_device *dev, unsigned long flags)
 984{
 985        struct drm_i915_private *dev_priv;
 986        struct intel_device_info *info, *device_info;
 987        int ret = 0;
 988        uint32_t aperture_size;
 989
 990        info = (struct intel_device_info *) flags;
 991
 992        dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
 993        if (dev_priv == NULL)
 994                return -ENOMEM;
 995
 996        dev->dev_private = dev_priv;
 997        dev_priv->dev = dev;
 998
 999        /* Setup the write-once "constant" device info */
1000        device_info = (struct intel_device_info *)&dev_priv->info;
1001        memcpy(device_info, info, sizeof(dev_priv->info));
1002        device_info->device_id = dev->pdev->device;
1003
1004        spin_lock_init(&dev_priv->irq_lock);
1005        spin_lock_init(&dev_priv->gpu_error.lock);
1006        mutex_init(&dev_priv->backlight_lock);
1007        spin_lock_init(&dev_priv->uncore.lock);
1008        spin_lock_init(&dev_priv->mm.object_stat_lock);
1009        spin_lock_init(&dev_priv->mmio_flip_lock);
1010        mutex_init(&dev_priv->sb_lock);
1011        mutex_init(&dev_priv->modeset_restore_lock);
1012        mutex_init(&dev_priv->av_mutex);
1013
1014        ret = i915_workqueues_init(dev_priv);
1015        if (ret < 0)
1016                goto out_free_priv;
1017
1018        intel_pm_setup(dev);
1019
1020        intel_runtime_pm_get(dev_priv);
1021
1022        intel_display_crc_init(dev);
1023
1024        i915_dump_device_info(dev_priv);
1025
1026        /* Not all pre-production machines fall into this category, only the
1027         * very first ones. Almost everything should work, except for maybe
1028         * suspend/resume. And we don't implement workarounds that affect only
1029         * pre-production machines. */
1030        if (IS_HSW_EARLY_SDV(dev))
1031                DRM_INFO("This is an early pre-production Haswell machine. "
1032                         "It may not be fully functional.\n");
1033
1034        if (i915_get_bridge_dev(dev)) {
1035                ret = -EIO;
1036                goto out_runtime_pm_put;
1037        }
1038
1039        ret = i915_mmio_setup(dev);
1040        if (ret < 0)
1041                goto put_bridge;
1042
1043        /* This must be called before any calls to HAS_PCH_* */
1044        intel_detect_pch(dev);
1045
1046        intel_uncore_init(dev);
1047
1048        ret = i915_gem_gtt_init(dev);
1049        if (ret)
1050                goto out_uncore_fini;
1051
1052        /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1053         * otherwise the vga fbdev driver falls over. */
1054        ret = i915_kick_out_firmware_fb(dev_priv);
1055        if (ret) {
1056                DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1057                goto out_gtt;
1058        }
1059
1060        ret = i915_kick_out_vgacon(dev_priv);
1061        if (ret) {
1062                DRM_ERROR("failed to remove conflicting VGA console\n");
1063                goto out_gtt;
1064        }
1065
1066        pci_set_master(dev->pdev);
1067
1068        /* overlay on gen2 is broken and can't address above 1G */
1069        if (IS_GEN2(dev))
1070                dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1071
1072        /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1073         * using 32bit addressing, overwriting memory if HWS is located
1074         * above 4GB.
1075         *
1076         * The documentation also mentions an issue with undefined
1077         * behaviour if any general state is accessed within a page above 4GB,
1078         * which also needs to be handled carefully.
1079         */
1080        if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1081                dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1082
1083        aperture_size = dev_priv->gtt.mappable_end;
1084
1085        dev_priv->gtt.mappable =
1086                io_mapping_create_wc(dev_priv->gtt.mappable_base,
1087                                     aperture_size);
1088        if (dev_priv->gtt.mappable == NULL) {
1089                ret = -EIO;
1090                goto out_gtt;
1091        }
1092
1093        dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1094                                              aperture_size);
1095
1096        intel_irq_init(dev_priv);
1097        intel_uncore_sanitize(dev);
1098
1099        intel_opregion_setup(dev);
1100
1101        i915_gem_load_init(dev);
1102        i915_gem_shrinker_init(dev_priv);
1103
1104        /* On the 945G/GM, the chipset reports the MSI capability on the
1105         * integrated graphics even though the support isn't actually there
1106         * according to the published specs.  It doesn't appear to function
1107         * correctly in testing on 945G.
1108         * This may be a side effect of MSI having been made available for PEG
1109         * and the registers being closely associated.
1110         *
1111         * According to chipset errata, on the 965GM, MSI interrupts may
1112         * be lost or delayed, but we use them anyways to avoid
1113         * stuck interrupts on some machines.
1114         */
1115        if (!IS_I945G(dev) && !IS_I945GM(dev)) {
1116                if (pci_enable_msi(dev->pdev) < 0)
1117                        DRM_DEBUG_DRIVER("can't enable MSI");
1118        }
1119
1120        intel_device_info_runtime_init(dev);
1121
1122        intel_init_dpio(dev_priv);
1123
1124        if (INTEL_INFO(dev)->num_pipes) {
1125                ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1126                if (ret)
1127                        goto out_gem_unload;
1128        }
1129
1130        intel_power_domains_init(dev_priv);
1131
1132        ret = i915_load_modeset_init(dev);
1133        if (ret < 0) {
1134                DRM_ERROR("failed to init modeset\n");
1135                goto out_power_well;
1136        }
1137
1138        /*
1139         * Notify a valid surface after modesetting,
1140         * when running inside a VM.
1141         */
1142        if (intel_vgpu_active(dev))
1143                I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1144
1145        i915_setup_sysfs(dev);
1146
1147        if (INTEL_INFO(dev)->num_pipes) {
1148                /* Must be done after probing outputs */
1149                intel_opregion_init(dev);
1150                acpi_video_register();
1151        }
1152
1153        if (IS_GEN5(dev))
1154                intel_gpu_ips_init(dev_priv);
1155
1156        intel_runtime_pm_enable(dev_priv);
1157
1158        i915_audio_component_init(dev_priv);
1159
1160        intel_runtime_pm_put(dev_priv);
1161
1162        return 0;
1163
1164out_power_well:
1165        intel_power_domains_fini(dev_priv);
1166        drm_vblank_cleanup(dev);
1167out_gem_unload:
1168        i915_gem_shrinker_cleanup(dev_priv);
1169
1170        if (dev->pdev->msi_enabled)
1171                pci_disable_msi(dev->pdev);
1172
1173        intel_teardown_mchbar(dev);
1174        pm_qos_remove_request(&dev_priv->pm_qos);
1175        arch_phys_wc_del(dev_priv->gtt.mtrr);
1176        io_mapping_free(dev_priv->gtt.mappable);
1177out_gtt:
1178        i915_global_gtt_cleanup(dev);
1179out_uncore_fini:
1180        intel_uncore_fini(dev);
1181        i915_mmio_cleanup(dev);
1182put_bridge:
1183        pci_dev_put(dev_priv->bridge_dev);
1184        i915_gem_load_cleanup(dev);
1185out_runtime_pm_put:
1186        intel_runtime_pm_put(dev_priv);
1187        i915_workqueues_cleanup(dev_priv);
1188out_free_priv:
1189        kfree(dev_priv);
1190
1191        return ret;
1192}
1193
1194int i915_driver_unload(struct drm_device *dev)
1195{
1196        struct drm_i915_private *dev_priv = dev->dev_private;
1197        int ret;
1198
1199        intel_fbdev_fini(dev);
1200
1201        i915_audio_component_cleanup(dev_priv);
1202
1203        ret = i915_gem_suspend(dev);
1204        if (ret) {
1205                DRM_ERROR("failed to idle hardware: %d\n", ret);
1206                return ret;
1207        }
1208
1209        intel_power_domains_fini(dev_priv);
1210
1211        intel_gpu_ips_teardown();
1212
1213        i915_teardown_sysfs(dev);
1214
1215        i915_gem_shrinker_cleanup(dev_priv);
1216
1217        io_mapping_free(dev_priv->gtt.mappable);
1218        arch_phys_wc_del(dev_priv->gtt.mtrr);
1219
1220        acpi_video_unregister();
1221
1222        drm_vblank_cleanup(dev);
1223
1224        intel_modeset_cleanup(dev);
1225
1226        /*
1227         * free the memory space allocated for the child device
1228         * config parsed from VBT
1229         */
1230        if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1231                kfree(dev_priv->vbt.child_dev);
1232                dev_priv->vbt.child_dev = NULL;
1233                dev_priv->vbt.child_dev_num = 0;
1234        }
1235        kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1236        dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1237        kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1238        dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1239
1240        vga_switcheroo_unregister_client(dev->pdev);
1241        vga_client_register(dev->pdev, NULL, NULL, NULL);
1242
1243        intel_csr_ucode_fini(dev_priv);
1244
1245        /* Free error state after interrupts are fully disabled. */
1246        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1247        i915_destroy_error_state(dev);
1248
1249        if (dev->pdev->msi_enabled)
1250                pci_disable_msi(dev->pdev);
1251
1252        intel_opregion_fini(dev);
1253
1254        /* Flush any outstanding unpin_work. */
1255        flush_workqueue(dev_priv->wq);
1256
1257        intel_guc_ucode_fini(dev);
1258        mutex_lock(&dev->struct_mutex);
1259        i915_gem_cleanup_ringbuffer(dev);
1260        i915_gem_context_fini(dev);
1261        mutex_unlock(&dev->struct_mutex);
1262        intel_fbc_cleanup_cfb(dev_priv);
1263
1264        pm_qos_remove_request(&dev_priv->pm_qos);
1265
1266        i915_global_gtt_cleanup(dev);
1267
1268        intel_uncore_fini(dev);
1269        i915_mmio_cleanup(dev);
1270
1271        i915_gem_load_cleanup(dev);
1272        pci_dev_put(dev_priv->bridge_dev);
1273        i915_workqueues_cleanup(dev_priv);
1274        kfree(dev_priv);
1275
1276        return 0;
1277}
1278
1279int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1280{
1281        int ret;
1282
1283        ret = i915_gem_open(dev, file);
1284        if (ret)
1285                return ret;
1286
1287        return 0;
1288}
1289
1290/**
1291 * i915_driver_lastclose - clean up after all DRM clients have exited
1292 * @dev: DRM device
1293 *
1294 * Take care of cleaning up after all DRM clients have exited.  In the
1295 * mode setting case, we want to restore the kernel's initial mode (just
1296 * in case the last client left us in a bad state).
1297 *
1298 * Additionally, in the non-mode setting case, we'll tear down the GTT
1299 * and DMA structures, since the kernel won't be using them, and clea
1300 * up any GEM state.
1301 */
1302void i915_driver_lastclose(struct drm_device *dev)
1303{
1304        intel_fbdev_restore_mode(dev);
1305        vga_switcheroo_process_delayed_switch();
1306}
1307
1308void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1309{
1310        mutex_lock(&dev->struct_mutex);
1311        i915_gem_context_close(dev, file);
1312        i915_gem_release(dev, file);
1313        mutex_unlock(&dev->struct_mutex);
1314}
1315
1316void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1317{
1318        struct drm_i915_file_private *file_priv = file->driver_priv;
1319
1320        kfree(file_priv);
1321}
1322
1323static int
1324i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1325                          struct drm_file *file)
1326{
1327        return -ENODEV;
1328}
1329
1330const struct drm_ioctl_desc i915_ioctls[] = {
1331        DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1332        DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1333        DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1334        DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1335        DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1336        DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1337        DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1338        DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1339        DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1340        DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1341        DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1342        DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1343        DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1344        DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1345        DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
1346        DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1347        DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1348        DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1349        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1350        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
1351        DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1352        DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1353        DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1354        DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1355        DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1356        DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1357        DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1358        DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1359        DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1360        DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1361        DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1362        DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1363        DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
1364        DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1365        DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1366        DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
1367        DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
1368        DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1369        DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1370        DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1371        DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
1372        DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
1373        DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1374        DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1375        DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1376        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1377        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1378        DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1379        DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
1380        DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1381        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1382        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1383};
1384
1385int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
1386