linux/drivers/gpu/drm/i915/i915_drv.h
<<
>>
Prefs
   1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
   2 */
   3/*
   4 *
   5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   6 * All Rights Reserved.
   7 *
   8 * Permission is hereby granted, free of charge, to any person obtaining a
   9 * copy of this software and associated documentation files (the
  10 * "Software"), to deal in the Software without restriction, including
  11 * without limitation the rights to use, copy, modify, merge, publish,
  12 * distribute, sub license, and/or sell copies of the Software, and to
  13 * permit persons to whom the Software is furnished to do so, subject to
  14 * the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the
  17 * next paragraph) shall be included in all copies or substantial portions
  18 * of the Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27 *
  28 */
  29
  30#ifndef _I915_DRV_H_
  31#define _I915_DRV_H_
  32
  33#include <uapi/drm/i915_drm.h>
  34#include <uapi/drm/drm_fourcc.h>
  35
  36#include <asm/hypervisor.h>
  37
  38#include <linux/io-mapping.h>
  39#include <linux/i2c.h>
  40#include <linux/i2c-algo-bit.h>
  41#include <linux/backlight.h>
  42#include <linux/hash.h>
  43#include <linux/intel-iommu.h>
  44#include <linux/kref.h>
  45#include <linux/mm_types.h>
  46#include <linux/perf_event.h>
  47#include <linux/pm_qos.h>
  48#include <linux/dma-resv.h>
  49#include <linux/shmem_fs.h>
  50#include <linux/stackdepot.h>
  51#include <linux/xarray.h>
  52
  53#include <drm/intel-gtt.h>
  54#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
  55#include <drm/drm_gem.h>
  56#include <drm/drm_auth.h>
  57#include <drm/drm_cache.h>
  58#include <drm/drm_util.h>
  59#include <drm/drm_dsc.h>
  60#include <drm/drm_atomic.h>
  61#include <drm/drm_connector.h>
  62#include <drm/i915_mei_hdcp_interface.h>
  63
  64#include "i915_params.h"
  65#include "i915_reg.h"
  66#include "i915_utils.h"
  67
  68#include "display/intel_bios.h"
  69#include "display/intel_display.h"
  70#include "display/intel_display_power.h"
  71#include "display/intel_dpll_mgr.h"
  72#include "display/intel_dsb.h"
  73#include "display/intel_frontbuffer.h"
  74#include "display/intel_global_state.h"
  75#include "display/intel_gmbus.h"
  76#include "display/intel_opregion.h"
  77
  78#include "gem/i915_gem_context_types.h"
  79#include "gem/i915_gem_shrinker.h"
  80#include "gem/i915_gem_stolen.h"
  81
  82#include "gt/intel_lrc.h"
  83#include "gt/intel_engine.h"
  84#include "gt/intel_gt_types.h"
  85#include "gt/intel_workarounds.h"
  86#include "gt/uc/intel_uc.h"
  87
  88#include "intel_device_info.h"
  89#include "intel_pch.h"
  90#include "intel_runtime_pm.h"
  91#include "intel_memory_region.h"
  92#include "intel_uncore.h"
  93#include "intel_wakeref.h"
  94#include "intel_wopcm.h"
  95
  96#include "i915_gem.h"
  97#include "i915_gem_gtt.h"
  98#include "i915_gpu_error.h"
  99#include "i915_perf_types.h"
 100#include "i915_request.h"
 101#include "i915_scheduler.h"
 102#include "gt/intel_timeline.h"
 103#include "i915_vma.h"
 104#include "i915_irq.h"
 105
 106#include "intel_region_lmem.h"
 107
 108/* General customization:
 109 */
 110
 111#define DRIVER_NAME             "i915"
 112#define DRIVER_DESC             "Intel Graphics"
 113#define DRIVER_DATE             "20200917"
 114#define DRIVER_TIMESTAMP        1600375437
 115
 116struct drm_i915_gem_object;
 117
 118enum hpd_pin {
 119        HPD_NONE = 0,
 120        HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
 121        HPD_CRT,
 122        HPD_SDVO_B,
 123        HPD_SDVO_C,
 124        HPD_PORT_A,
 125        HPD_PORT_B,
 126        HPD_PORT_C,
 127        HPD_PORT_D,
 128        HPD_PORT_E,
 129        HPD_PORT_TC1,
 130        HPD_PORT_TC2,
 131        HPD_PORT_TC3,
 132        HPD_PORT_TC4,
 133        HPD_PORT_TC5,
 134        HPD_PORT_TC6,
 135
 136        HPD_NUM_PINS
 137};
 138
 139#define for_each_hpd_pin(__pin) \
 140        for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
 141
 142/* Threshold == 5 for long IRQs, 50 for short */
 143#define HPD_STORM_DEFAULT_THRESHOLD 50
 144
 145struct i915_hotplug {
 146        struct delayed_work hotplug_work;
 147
 148        const u32 *hpd, *pch_hpd;
 149
 150        struct {
 151                unsigned long last_jiffies;
 152                int count;
 153                enum {
 154                        HPD_ENABLED = 0,
 155                        HPD_DISABLED = 1,
 156                        HPD_MARK_DISABLED = 2
 157                } state;
 158        } stats[HPD_NUM_PINS];
 159        u32 event_bits;
 160        u32 retry_bits;
 161        struct delayed_work reenable_work;
 162
 163        u32 long_port_mask;
 164        u32 short_port_mask;
 165        struct work_struct dig_port_work;
 166
 167        struct work_struct poll_init_work;
 168        bool poll_enabled;
 169
 170        unsigned int hpd_storm_threshold;
 171        /* Whether or not to count short HPD IRQs in HPD storms */
 172        u8 hpd_short_storm_enabled;
 173
 174        /*
 175         * if we get a HPD irq from DP and a HPD irq from non-DP
 176         * the non-DP HPD could block the workqueue on a mode config
 177         * mutex getting, that userspace may have taken. However
 178         * userspace is waiting on the DP workqueue to run which is
 179         * blocked behind the non-DP one.
 180         */
 181        struct workqueue_struct *dp_wq;
 182};
 183
 184#define I915_GEM_GPU_DOMAINS \
 185        (I915_GEM_DOMAIN_RENDER | \
 186         I915_GEM_DOMAIN_SAMPLER | \
 187         I915_GEM_DOMAIN_COMMAND | \
 188         I915_GEM_DOMAIN_INSTRUCTION | \
 189         I915_GEM_DOMAIN_VERTEX)
 190
 191struct drm_i915_private;
 192struct i915_mm_struct;
 193struct i915_mmu_object;
 194
 195struct drm_i915_file_private {
 196        struct drm_i915_private *dev_priv;
 197
 198        union {
 199                struct drm_file *file;
 200                struct rcu_head rcu;
 201        };
 202
 203        struct xarray context_xa;
 204        struct xarray vm_xa;
 205
 206        unsigned int bsd_engine;
 207
 208/*
 209 * Every context ban increments per client ban score. Also
 210 * hangs in short succession increments ban score. If ban threshold
 211 * is reached, client is considered banned and submitting more work
 212 * will fail. This is a stop gap measure to limit the badly behaving
 213 * clients access to gpu. Note that unbannable contexts never increment
 214 * the client ban score.
 215 */
 216#define I915_CLIENT_SCORE_HANG_FAST     1
 217#define   I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
 218#define I915_CLIENT_SCORE_CONTEXT_BAN   3
 219#define I915_CLIENT_SCORE_BANNED        9
 220        /** ban_score: Accumulated score of all ctx bans and fast hangs. */
 221        atomic_t ban_score;
 222        unsigned long hang_timestamp;
 223};
 224
 225/* Interface history:
 226 *
 227 * 1.1: Original.
 228 * 1.2: Add Power Management
 229 * 1.3: Add vblank support
 230 * 1.4: Fix cmdbuffer path, add heap destroy
 231 * 1.5: Add vblank pipe configuration
 232 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
 233 *      - Support vertical blank on secondary display pipe
 234 */
 235#define DRIVER_MAJOR            1
 236#define DRIVER_MINOR            6
 237#define DRIVER_PATCHLEVEL       0
 238
 239struct intel_overlay;
 240struct intel_overlay_error_state;
 241
 242struct sdvo_device_mapping {
 243        u8 initialized;
 244        u8 dvo_port;
 245        u8 slave_addr;
 246        u8 dvo_wiring;
 247        u8 i2c_pin;
 248        u8 ddc_pin;
 249};
 250
 251struct intel_connector;
 252struct intel_encoder;
 253struct intel_atomic_state;
 254struct intel_cdclk_config;
 255struct intel_cdclk_state;
 256struct intel_cdclk_vals;
 257struct intel_initial_plane_config;
 258struct intel_crtc;
 259struct intel_limit;
 260struct dpll;
 261
 262struct drm_i915_display_funcs {
 263        void (*get_cdclk)(struct drm_i915_private *dev_priv,
 264                          struct intel_cdclk_config *cdclk_config);
 265        void (*set_cdclk)(struct drm_i915_private *dev_priv,
 266                          const struct intel_cdclk_config *cdclk_config,
 267                          enum pipe pipe);
 268        int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
 269        int (*get_fifo_size)(struct drm_i915_private *dev_priv,
 270                             enum i9xx_plane_id i9xx_plane);
 271        int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
 272        int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
 273        void (*initial_watermarks)(struct intel_atomic_state *state,
 274                                   struct intel_crtc *crtc);
 275        void (*atomic_update_watermarks)(struct intel_atomic_state *state,
 276                                         struct intel_crtc *crtc);
 277        void (*optimize_watermarks)(struct intel_atomic_state *state,
 278                                    struct intel_crtc *crtc);
 279        int (*compute_global_watermarks)(struct intel_atomic_state *state);
 280        void (*update_wm)(struct intel_crtc *crtc);
 281        int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
 282        u8 (*calc_voltage_level)(int cdclk);
 283        /* Returns the active state of the crtc, and if the crtc is active,
 284         * fills out the pipe-config with the hw state. */
 285        bool (*get_pipe_config)(struct intel_crtc *,
 286                                struct intel_crtc_state *);
 287        void (*get_initial_plane_config)(struct intel_crtc *,
 288                                         struct intel_initial_plane_config *);
 289        int (*crtc_compute_clock)(struct intel_crtc *crtc,
 290                                  struct intel_crtc_state *crtc_state);
 291        void (*crtc_enable)(struct intel_atomic_state *state,
 292                            struct intel_crtc *crtc);
 293        void (*crtc_disable)(struct intel_atomic_state *state,
 294                             struct intel_crtc *crtc);
 295        void (*commit_modeset_enables)(struct intel_atomic_state *state);
 296        void (*commit_modeset_disables)(struct intel_atomic_state *state);
 297        void (*audio_codec_enable)(struct intel_encoder *encoder,
 298                                   const struct intel_crtc_state *crtc_state,
 299                                   const struct drm_connector_state *conn_state);
 300        void (*audio_codec_disable)(struct intel_encoder *encoder,
 301                                    const struct intel_crtc_state *old_crtc_state,
 302                                    const struct drm_connector_state *old_conn_state);
 303        void (*fdi_link_train)(struct intel_crtc *crtc,
 304                               const struct intel_crtc_state *crtc_state);
 305        void (*init_clock_gating)(struct drm_i915_private *dev_priv);
 306        void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
 307        /* clock updates for mode set */
 308        /* cursor updates */
 309        /* render clock increase/decrease */
 310        /* display clock increase/decrease */
 311        /* pll clock increase/decrease */
 312
 313        int (*color_check)(struct intel_crtc_state *crtc_state);
 314        /*
 315         * Program double buffered color management registers during
 316         * vblank evasion. The registers should then latch during the
 317         * next vblank start, alongside any other double buffered registers
 318         * involved with the same commit.
 319         */
 320        void (*color_commit)(const struct intel_crtc_state *crtc_state);
 321        /*
 322         * Load LUTs (and other single buffered color management
 323         * registers). Will (hopefully) be called during the vblank
 324         * following the latching of any double buffered registers
 325         * involved with the same commit.
 326         */
 327        void (*load_luts)(const struct intel_crtc_state *crtc_state);
 328        void (*read_luts)(struct intel_crtc_state *crtc_state);
 329};
 330
 331struct intel_csr {
 332        struct work_struct work;
 333        const char *fw_path;
 334        u32 required_version;
 335        u32 max_fw_size; /* bytes */
 336        u32 *dmc_payload;
 337        u32 dmc_fw_size; /* dwords */
 338        u32 version;
 339        u32 mmio_count;
 340        i915_reg_t mmioaddr[20];
 341        u32 mmiodata[20];
 342        u32 dc_state;
 343        u32 target_dc_state;
 344        u32 allowed_dc_mask;
 345        intel_wakeref_t wakeref;
 346};
 347
 348enum i915_cache_level {
 349        I915_CACHE_NONE = 0,
 350        I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
 351        I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
 352                              caches, eg sampler/render caches, and the
 353                              large Last-Level-Cache. LLC is coherent with
 354                              the CPU, but L3 is only visible to the GPU. */
 355        I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
 356};
 357
 358#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
 359
 360struct intel_fbc {
 361        /* This is always the inner lock when overlapping with struct_mutex and
 362         * it's the outer lock when overlapping with stolen_lock. */
 363        struct mutex lock;
 364        unsigned threshold;
 365        unsigned int possible_framebuffer_bits;
 366        unsigned int busy_bits;
 367        struct intel_crtc *crtc;
 368
 369        struct drm_mm_node compressed_fb;
 370        struct drm_mm_node *compressed_llb;
 371
 372        bool false_color;
 373
 374        bool active;
 375        bool activated;
 376        bool flip_pending;
 377
 378        bool underrun_detected;
 379        struct work_struct underrun_work;
 380
 381        /*
 382         * Due to the atomic rules we can't access some structures without the
 383         * appropriate locking, so we cache information here in order to avoid
 384         * these problems.
 385         */
 386        struct intel_fbc_state_cache {
 387                struct {
 388                        unsigned int mode_flags;
 389                        u32 hsw_bdw_pixel_rate;
 390                } crtc;
 391
 392                struct {
 393                        unsigned int rotation;
 394                        int src_w;
 395                        int src_h;
 396                        bool visible;
 397                        /*
 398                         * Display surface base address adjustement for
 399                         * pageflips. Note that on gen4+ this only adjusts up
 400                         * to a tile, offsets within a tile are handled in
 401                         * the hw itself (with the TILEOFF register).
 402                         */
 403                        int adjusted_x;
 404                        int adjusted_y;
 405
 406                        u16 pixel_blend_mode;
 407                } plane;
 408
 409                struct {
 410                        const struct drm_format_info *format;
 411                        unsigned int stride;
 412                        u64 modifier;
 413                } fb;
 414
 415                unsigned int fence_y_offset;
 416                u16 gen9_wa_cfb_stride;
 417                u16 interval;
 418                s8 fence_id;
 419        } state_cache;
 420
 421        /*
 422         * This structure contains everything that's relevant to program the
 423         * hardware registers. When we want to figure out if we need to disable
 424         * and re-enable FBC for a new configuration we just check if there's
 425         * something different in the struct. The genx_fbc_activate functions
 426         * are supposed to read from it in order to program the registers.
 427         */
 428        struct intel_fbc_reg_params {
 429                struct {
 430                        enum pipe pipe;
 431                        enum i9xx_plane_id i9xx_plane;
 432                } crtc;
 433
 434                struct {
 435                        const struct drm_format_info *format;
 436                        unsigned int stride;
 437                        u64 modifier;
 438                } fb;
 439
 440                int cfb_size;
 441                unsigned int fence_y_offset;
 442                u16 gen9_wa_cfb_stride;
 443                u16 interval;
 444                s8 fence_id;
 445                bool plane_visible;
 446        } params;
 447
 448        const char *no_fbc_reason;
 449};
 450
 451/*
 452 * HIGH_RR is the highest eDP panel refresh rate read from EDID
 453 * LOW_RR is the lowest eDP panel refresh rate found from EDID
 454 * parsing for same resolution.
 455 */
 456enum drrs_refresh_rate_type {
 457        DRRS_HIGH_RR,
 458        DRRS_LOW_RR,
 459        DRRS_MAX_RR, /* RR count */
 460};
 461
 462enum drrs_support_type {
 463        DRRS_NOT_SUPPORTED = 0,
 464        STATIC_DRRS_SUPPORT = 1,
 465        SEAMLESS_DRRS_SUPPORT = 2
 466};
 467
 468struct intel_dp;
 469struct i915_drrs {
 470        struct mutex mutex;
 471        struct delayed_work work;
 472        struct intel_dp *dp;
 473        unsigned busy_frontbuffer_bits;
 474        enum drrs_refresh_rate_type refresh_rate_type;
 475        enum drrs_support_type type;
 476};
 477
 478struct i915_psr {
 479        struct mutex lock;
 480
 481#define I915_PSR_DEBUG_MODE_MASK        0x0f
 482#define I915_PSR_DEBUG_DEFAULT          0x00
 483#define I915_PSR_DEBUG_DISABLE          0x01
 484#define I915_PSR_DEBUG_ENABLE           0x02
 485#define I915_PSR_DEBUG_FORCE_PSR1       0x03
 486#define I915_PSR_DEBUG_IRQ              0x10
 487
 488        u32 debug;
 489        bool sink_support;
 490        bool enabled;
 491        struct intel_dp *dp;
 492        enum pipe pipe;
 493        enum transcoder transcoder;
 494        bool active;
 495        struct work_struct work;
 496        unsigned busy_frontbuffer_bits;
 497        bool sink_psr2_support;
 498        bool link_standby;
 499        bool colorimetry_support;
 500        bool psr2_enabled;
 501        bool psr2_sel_fetch_enabled;
 502        u8 sink_sync_latency;
 503        ktime_t last_entry_attempt;
 504        ktime_t last_exit;
 505        bool sink_not_reliable;
 506        bool irq_aux_error;
 507        u16 su_x_granularity;
 508        bool dc3co_enabled;
 509        u32 dc3co_exit_delay;
 510        struct delayed_work dc3co_work;
 511        bool force_mode_changed;
 512        struct drm_dp_vsc_sdp vsc;
 513};
 514
 515#define QUIRK_LVDS_SSC_DISABLE (1<<1)
 516#define QUIRK_INVERT_BRIGHTNESS (1<<2)
 517#define QUIRK_BACKLIGHT_PRESENT (1<<3)
 518#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
 519#define QUIRK_INCREASE_T12_DELAY (1<<6)
 520#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
 521
 522struct intel_fbdev;
 523struct intel_fbc_work;
 524
 525struct intel_gmbus {
 526        struct i2c_adapter adapter;
 527#define GMBUS_FORCE_BIT_RETRY (1U << 31)
 528        u32 force_bit;
 529        u32 reg0;
 530        i915_reg_t gpio_reg;
 531        struct i2c_algo_bit_data bit_algo;
 532        struct drm_i915_private *dev_priv;
 533};
 534
 535struct i915_suspend_saved_registers {
 536        u32 saveDSPARB;
 537        u32 saveSWF0[16];
 538        u32 saveSWF1[16];
 539        u32 saveSWF3[3];
 540        u16 saveGCDGMBUS;
 541};
 542
 543struct vlv_s0ix_state;
 544
 545#define MAX_L3_SLICES 2
 546struct intel_l3_parity {
 547        u32 *remap_info[MAX_L3_SLICES];
 548        struct work_struct error_work;
 549        int which_slice;
 550};
 551
 552struct i915_gem_mm {
 553        /** Memory allocator for GTT stolen memory */
 554        struct drm_mm stolen;
 555        /** Protects the usage of the GTT stolen memory allocator. This is
 556         * always the inner lock when overlapping with struct_mutex. */
 557        struct mutex stolen_lock;
 558
 559        /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
 560        spinlock_t obj_lock;
 561
 562        /**
 563         * List of objects which are purgeable.
 564         */
 565        struct list_head purge_list;
 566
 567        /**
 568         * List of objects which have allocated pages and are shrinkable.
 569         */
 570        struct list_head shrink_list;
 571
 572        /**
 573         * List of objects which are pending destruction.
 574         */
 575        struct llist_head free_list;
 576        struct work_struct free_work;
 577        /**
 578         * Count of objects pending destructions. Used to skip needlessly
 579         * waiting on an RCU barrier if no objects are waiting to be freed.
 580         */
 581        atomic_t free_count;
 582
 583        /**
 584         * tmpfs instance used for shmem backed objects
 585         */
 586        struct vfsmount *gemfs;
 587
 588        struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
 589
 590        struct notifier_block oom_notifier;
 591        struct notifier_block vmap_notifier;
 592        struct shrinker shrinker;
 593
 594        /**
 595         * Workqueue to fault in userptr pages, flushed by the execbuf
 596         * when required but otherwise left to userspace to try again
 597         * on EAGAIN.
 598         */
 599        struct workqueue_struct *userptr_wq;
 600
 601        /* shrinker accounting, also useful for userland debugging */
 602        u64 shrink_memory;
 603        u32 shrink_count;
 604};
 605
 606#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
 607
 608unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
 609                                         u64 context);
 610
 611static inline unsigned long
 612i915_fence_timeout(const struct drm_i915_private *i915)
 613{
 614        return i915_fence_context_timeout(i915, U64_MAX);
 615}
 616
 617/* Amount of SAGV/QGV points, BSpec precisely defines this */
 618#define I915_NUM_QGV_POINTS 8
 619
 620struct ddi_vbt_port_info {
 621        /* Non-NULL if port present. */
 622        const struct child_device_config *child;
 623
 624        int max_tmds_clock;
 625
 626        /* This is an index in the HDMI/DVI DDI buffer translation table. */
 627        u8 hdmi_level_shift;
 628        u8 hdmi_level_shift_set:1;
 629
 630        u8 supports_dvi:1;
 631        u8 supports_hdmi:1;
 632        u8 supports_dp:1;
 633        u8 supports_edp:1;
 634        u8 supports_typec_usb:1;
 635        u8 supports_tbt:1;
 636
 637        u8 alternate_aux_channel;
 638        u8 alternate_ddc_pin;
 639
 640        u8 dp_boost_level;
 641        u8 hdmi_boost_level;
 642        int dp_max_link_rate;           /* 0 for not limited by VBT */
 643};
 644
 645enum psr_lines_to_wait {
 646        PSR_0_LINES_TO_WAIT = 0,
 647        PSR_1_LINE_TO_WAIT,
 648        PSR_4_LINES_TO_WAIT,
 649        PSR_8_LINES_TO_WAIT
 650};
 651
 652struct intel_vbt_data {
 653        struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 654        struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 655
 656        /* Feature bits */
 657        unsigned int int_tv_support:1;
 658        unsigned int lvds_dither:1;
 659        unsigned int int_crt_support:1;
 660        unsigned int lvds_use_ssc:1;
 661        unsigned int int_lvds_support:1;
 662        unsigned int display_clock_mode:1;
 663        unsigned int fdi_rx_polarity_inverted:1;
 664        unsigned int panel_type:4;
 665        int lvds_ssc_freq;
 666        unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
 667        enum drm_panel_orientation orientation;
 668
 669        enum drrs_support_type drrs_type;
 670
 671        struct {
 672                int rate;
 673                int lanes;
 674                int preemphasis;
 675                int vswing;
 676                bool low_vswing;
 677                bool initialized;
 678                int bpp;
 679                struct edp_power_seq pps;
 680                bool hobl;
 681        } edp;
 682
 683        struct {
 684                bool enable;
 685                bool full_link;
 686                bool require_aux_wakeup;
 687                int idle_frames;
 688                enum psr_lines_to_wait lines_to_wait;
 689                int tp1_wakeup_time_us;
 690                int tp2_tp3_wakeup_time_us;
 691                int psr2_tp2_tp3_wakeup_time_us;
 692        } psr;
 693
 694        struct {
 695                u16 pwm_freq_hz;
 696                bool present;
 697                bool active_low_pwm;
 698                u8 min_brightness;      /* min_brightness/255 of max */
 699                u8 controller;          /* brightness controller number */
 700                enum intel_backlight_type type;
 701        } backlight;
 702
 703        /* MIPI DSI */
 704        struct {
 705                u16 panel_id;
 706                struct mipi_config *config;
 707                struct mipi_pps_data *pps;
 708                u16 bl_ports;
 709                u16 cabc_ports;
 710                u8 seq_version;
 711                u32 size;
 712                u8 *data;
 713                const u8 *sequence[MIPI_SEQ_MAX];
 714                u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
 715                enum drm_panel_orientation orientation;
 716        } dsi;
 717
 718        int crt_ddc_pin;
 719
 720        struct list_head display_devices;
 721
 722        struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
 723        struct sdvo_device_mapping sdvo_mappings[2];
 724};
 725
 726enum intel_ddb_partitioning {
 727        INTEL_DDB_PART_1_2,
 728        INTEL_DDB_PART_5_6, /* IVB+ */
 729};
 730
 731struct ilk_wm_values {
 732        u32 wm_pipe[3];
 733        u32 wm_lp[3];
 734        u32 wm_lp_spr[3];
 735        bool enable_fbc_wm;
 736        enum intel_ddb_partitioning partitioning;
 737};
 738
 739struct g4x_pipe_wm {
 740        u16 plane[I915_MAX_PLANES];
 741        u16 fbc;
 742};
 743
 744struct g4x_sr_wm {
 745        u16 plane;
 746        u16 cursor;
 747        u16 fbc;
 748};
 749
 750struct vlv_wm_ddl_values {
 751        u8 plane[I915_MAX_PLANES];
 752};
 753
 754struct vlv_wm_values {
 755        struct g4x_pipe_wm pipe[3];
 756        struct g4x_sr_wm sr;
 757        struct vlv_wm_ddl_values ddl[3];
 758        u8 level;
 759        bool cxsr;
 760};
 761
 762struct g4x_wm_values {
 763        struct g4x_pipe_wm pipe[2];
 764        struct g4x_sr_wm sr;
 765        struct g4x_sr_wm hpll;
 766        bool cxsr;
 767        bool hpll_en;
 768        bool fbc_en;
 769};
 770
 771struct skl_ddb_entry {
 772        u16 start, end; /* in number of blocks, 'end' is exclusive */
 773};
 774
 775static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
 776{
 777        return entry->end - entry->start;
 778}
 779
 780static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
 781                                       const struct skl_ddb_entry *e2)
 782{
 783        if (e1->start == e2->start && e1->end == e2->end)
 784                return true;
 785
 786        return false;
 787}
 788
 789struct i915_frontbuffer_tracking {
 790        spinlock_t lock;
 791
 792        /*
 793         * Tracking bits for delayed frontbuffer flushing du to gpu activity or
 794         * scheduled flips.
 795         */
 796        unsigned busy_bits;
 797        unsigned flip_bits;
 798};
 799
 800struct i915_virtual_gpu {
 801        struct mutex lock; /* serialises sending of g2v_notify command pkts */
 802        bool active;
 803        u32 caps;
 804};
 805
 806struct intel_cdclk_config {
 807        unsigned int cdclk, vco, ref, bypass;
 808        u8 voltage_level;
 809};
 810
 811struct i915_selftest_stash {
 812        atomic_t counter;
 813};
 814
 815struct drm_i915_private {
 816        struct drm_device drm;
 817
 818        /* FIXME: Device release actions should all be moved to drmm_ */
 819        bool do_release;
 820
 821        /* i915 device parameters */
 822        struct i915_params params;
 823
 824        const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
 825        struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
 826        struct intel_driver_caps caps;
 827
 828        /**
 829         * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
 830         * end of stolen which we can optionally use to create GEM objects
 831         * backed by stolen memory. Note that stolen_usable_size tells us
 832         * exactly how much of this we are actually allowed to use, given that
 833         * some portion of it is in fact reserved for use by hardware functions.
 834         */
 835        struct resource dsm;
 836        /**
 837         * Reseved portion of Data Stolen Memory
 838         */
 839        struct resource dsm_reserved;
 840
 841        /*
 842         * Stolen memory is segmented in hardware with different portions
 843         * offlimits to certain functions.
 844         *
 845         * The drm_mm is initialised to the total accessible range, as found
 846         * from the PCI config. On Broadwell+, this is further restricted to
 847         * avoid the first page! The upper end of stolen memory is reserved for
 848         * hardware functions and similarly removed from the accessible range.
 849         */
 850        resource_size_t stolen_usable_size;     /* Total size minus reserved ranges */
 851
 852        struct intel_uncore uncore;
 853        struct intel_uncore_mmio_debug mmio_debug;
 854
 855        struct i915_virtual_gpu vgpu;
 856
 857        struct intel_gvt *gvt;
 858
 859        struct intel_wopcm wopcm;
 860
 861        struct intel_csr csr;
 862
 863        struct intel_gmbus gmbus[GMBUS_NUM_PINS];
 864
 865        /** gmbus_mutex protects against concurrent usage of the single hw gmbus
 866         * controller on different i2c buses. */
 867        struct mutex gmbus_mutex;
 868
 869        /**
 870         * Base address of where the gmbus and gpio blocks are located (either
 871         * on PCH or on SoC for platforms without PCH).
 872         */
 873        u32 gpio_mmio_base;
 874
 875        u32 hsw_psr_mmio_adjust;
 876
 877        /* MMIO base address for MIPI regs */
 878        u32 mipi_mmio_base;
 879
 880        u32 pps_mmio_base;
 881
 882        wait_queue_head_t gmbus_wait_queue;
 883
 884        struct pci_dev *bridge_dev;
 885
 886        struct rb_root uabi_engines;
 887
 888        struct resource mch_res;
 889
 890        /* protects the irq masks */
 891        spinlock_t irq_lock;
 892
 893        bool display_irqs_enabled;
 894
 895        /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
 896        struct pm_qos_request pm_qos;
 897
 898        /* Sideband mailbox protection */
 899        struct mutex sb_lock;
 900        struct pm_qos_request sb_qos;
 901
 902        /** Cached value of IMR to avoid reads in updating the bitfield */
 903        union {
 904                u32 irq_mask;
 905                u32 de_irq_mask[I915_MAX_PIPES];
 906        };
 907        u32 pipestat_irq_mask[I915_MAX_PIPES];
 908
 909        struct i915_hotplug hotplug;
 910        struct intel_fbc fbc;
 911        struct i915_drrs drrs;
 912        struct intel_opregion opregion;
 913        struct intel_vbt_data vbt;
 914
 915        bool preserve_bios_swizzle;
 916
 917        /* overlay */
 918        struct intel_overlay *overlay;
 919
 920        /* backlight registers and fields in struct intel_panel */
 921        struct mutex backlight_lock;
 922
 923        /* protects panel power sequencer state */
 924        struct mutex pps_mutex;
 925
 926        unsigned int fsb_freq, mem_freq, is_ddr3;
 927        unsigned int skl_preferred_vco_freq;
 928        unsigned int max_cdclk_freq;
 929
 930        unsigned int max_dotclk_freq;
 931        unsigned int hpll_freq;
 932        unsigned int fdi_pll_freq;
 933        unsigned int czclk_freq;
 934
 935        struct {
 936                /* The current hardware cdclk configuration */
 937                struct intel_cdclk_config hw;
 938
 939                /* cdclk, divider, and ratio table from bspec */
 940                const struct intel_cdclk_vals *table;
 941
 942                struct intel_global_obj obj;
 943        } cdclk;
 944
 945        struct {
 946                /* The current hardware dbuf configuration */
 947                u8 enabled_slices;
 948
 949                struct intel_global_obj obj;
 950        } dbuf;
 951
 952        /**
 953         * wq - Driver workqueue for GEM.
 954         *
 955         * NOTE: Work items scheduled here are not allowed to grab any modeset
 956         * locks, for otherwise the flushing done in the pageflip code will
 957         * result in deadlocks.
 958         */
 959        struct workqueue_struct *wq;
 960
 961        /* ordered wq for modesets */
 962        struct workqueue_struct *modeset_wq;
 963        /* unbound hipri wq for page flips/plane updates */
 964        struct workqueue_struct *flip_wq;
 965
 966        /* Display functions */
 967        struct drm_i915_display_funcs display;
 968
 969        /* PCH chipset type */
 970        enum intel_pch pch_type;
 971        unsigned short pch_id;
 972
 973        unsigned long quirks;
 974
 975        struct drm_atomic_state *modeset_restore_state;
 976        struct drm_modeset_acquire_ctx reset_ctx;
 977
 978        struct i915_ggtt ggtt; /* VM representing the global address space */
 979
 980        struct i915_gem_mm mm;
 981        DECLARE_HASHTABLE(mm_structs, 7);
 982        spinlock_t mm_lock;
 983
 984        /* Kernel Modesetting */
 985
 986        struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
 987        struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
 988
 989        /**
 990         * dpll and cdclk state is protected by connection_mutex
 991         * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
 992         * Must be global rather than per dpll, because on some platforms plls
 993         * share registers.
 994         */
 995        struct {
 996                struct mutex lock;
 997
 998                int num_shared_dpll;
 999                struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1000                const struct intel_dpll_mgr *mgr;
1001
1002                struct {
1003                        int nssc;
1004                        int ssc;
1005                } ref_clks;
1006        } dpll;
1007
1008        struct list_head global_obj_list;
1009
1010        /*
1011         * For reading active_pipes holding any crtc lock is
1012         * sufficient, for writing must hold all of them.
1013         */
1014        u8 active_pipes;
1015
1016        struct i915_wa_list gt_wa_list;
1017
1018        struct i915_frontbuffer_tracking fb_tracking;
1019
1020        struct intel_atomic_helper {
1021                struct llist_head free_list;
1022                struct work_struct free_work;
1023        } atomic_helper;
1024
1025        bool mchbar_need_disable;
1026
1027        struct intel_l3_parity l3_parity;
1028
1029        /*
1030         * HTI (aka HDPORT) state read during initial hw readout.  Most
1031         * platforms don't have HTI, so this will just stay 0.  Those that do
1032         * will use this later to figure out which PLLs and PHYs are unavailable
1033         * for driver usage.
1034         */
1035        u32 hti_state;
1036
1037        /*
1038         * edram size in MB.
1039         * Cannot be determined by PCIID. You must always read a register.
1040         */
1041        u32 edram_size_mb;
1042
1043        struct i915_power_domains power_domains;
1044
1045        struct i915_psr psr;
1046
1047        struct i915_gpu_error gpu_error;
1048
1049        struct drm_i915_gem_object *vlv_pctx;
1050
1051        /* list of fbdev register on this device */
1052        struct intel_fbdev *fbdev;
1053        struct work_struct fbdev_suspend_work;
1054
1055        struct drm_property *broadcast_rgb_property;
1056        struct drm_property *force_audio_property;
1057
1058        /* hda/i915 audio component */
1059        struct i915_audio_component *audio_component;
1060        bool audio_component_registered;
1061        /**
1062         * av_mutex - mutex for audio/video sync
1063         *
1064         */
1065        struct mutex av_mutex;
1066        int audio_power_refcount;
1067        u32 audio_freq_cntrl;
1068
1069        u32 fdi_rx_config;
1070
1071        /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
1072        u32 chv_phy_control;
1073        /*
1074         * Shadows for CHV DPLL_MD regs to keep the state
1075         * checker somewhat working in the presence hardware
1076         * crappiness (can't read out DPLL_MD for pipes B & C).
1077         */
1078        u32 chv_dpll_md[I915_MAX_PIPES];
1079        u32 bxt_phy_grc;
1080
1081        u32 suspend_count;
1082        bool power_domains_suspended;
1083        struct i915_suspend_saved_registers regfile;
1084        struct vlv_s0ix_state *vlv_s0ix_state;
1085
1086        enum {
1087                I915_SAGV_UNKNOWN = 0,
1088                I915_SAGV_DISABLED,
1089                I915_SAGV_ENABLED,
1090                I915_SAGV_NOT_CONTROLLED
1091        } sagv_status;
1092
1093        u32 sagv_block_time_us;
1094
1095        struct {
1096                /*
1097                 * Raw watermark latency values:
1098                 * in 0.1us units for WM0,
1099                 * in 0.5us units for WM1+.
1100                 */
1101                /* primary */
1102                u16 pri_latency[5];
1103                /* sprite */
1104                u16 spr_latency[5];
1105                /* cursor */
1106                u16 cur_latency[5];
1107                /*
1108                 * Raw watermark memory latency values
1109                 * for SKL for all 8 levels
1110                 * in 1us units.
1111                 */
1112                u16 skl_latency[8];
1113
1114                /* current hardware state */
1115                union {
1116                        struct ilk_wm_values hw;
1117                        struct vlv_wm_values vlv;
1118                        struct g4x_wm_values g4x;
1119                };
1120
1121                u8 max_level;
1122
1123                /*
1124                 * Should be held around atomic WM register writing; also
1125                 * protects * intel_crtc->wm.active and
1126                 * crtc_state->wm.need_postvbl_update.
1127                 */
1128                struct mutex wm_mutex;
1129
1130                /*
1131                 * Set during HW readout of watermarks/DDB.  Some platforms
1132                 * need to know when we're still using BIOS-provided values
1133                 * (which we don't fully trust).
1134                 *
1135                 * FIXME get rid of this.
1136                 */
1137                bool distrust_bios_wm;
1138        } wm;
1139
1140        struct dram_info {
1141                bool valid;
1142                bool is_16gb_dimm;
1143                u8 num_channels;
1144                u8 ranks;
1145                u32 bandwidth_kbps;
1146                bool symmetric_memory;
1147                enum intel_dram_type {
1148                        INTEL_DRAM_UNKNOWN,
1149                        INTEL_DRAM_DDR3,
1150                        INTEL_DRAM_DDR4,
1151                        INTEL_DRAM_LPDDR3,
1152                        INTEL_DRAM_LPDDR4
1153                } type;
1154        } dram_info;
1155
1156        struct intel_bw_info {
1157                /* for each QGV point */
1158                unsigned int deratedbw[I915_NUM_QGV_POINTS];
1159                u8 num_qgv_points;
1160                u8 num_planes;
1161        } max_bw[6];
1162
1163        struct intel_global_obj bw_obj;
1164
1165        struct intel_runtime_pm runtime_pm;
1166
1167        struct i915_perf perf;
1168
1169        /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1170        struct intel_gt gt;
1171
1172        struct {
1173                struct i915_gem_contexts {
1174                        spinlock_t lock; /* locks list */
1175                        struct list_head list;
1176
1177                        struct llist_head free_list;
1178                        struct work_struct free_work;
1179                } contexts;
1180
1181                /*
1182                 * We replace the local file with a global mappings as the
1183                 * backing storage for the mmap is on the device and not
1184                 * on the struct file, and we do not want to prolong the
1185                 * lifetime of the local fd. To minimise the number of
1186                 * anonymous inodes we create, we use a global singleton to
1187                 * share the global mapping.
1188                 */
1189                struct file *mmap_singleton;
1190        } gem;
1191
1192        u8 pch_ssc_use;
1193
1194        /* For i915gm/i945gm vblank irq workaround */
1195        u8 vblank_enabled;
1196
1197        /* perform PHY state sanity checks? */
1198        bool chv_phy_assert[2];
1199
1200        bool ipc_enabled;
1201
1202        /* Used to save the pipe-to-encoder mapping for audio */
1203        struct intel_encoder *av_enc_map[I915_MAX_PIPES];
1204
1205        /* necessary resource sharing with HDMI LPE audio driver. */
1206        struct {
1207                struct platform_device *platdev;
1208                int     irq;
1209        } lpe_audio;
1210
1211        struct i915_pmu pmu;
1212
1213        struct i915_hdcp_comp_master *hdcp_master;
1214        bool hdcp_comp_added;
1215
1216        /* Mutex to protect the above hdcp component related values. */
1217        struct mutex hdcp_comp_mutex;
1218
1219        I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
1220
1221        /*
1222         * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1223         * will be rejected. Instead look for a better place.
1224         */
1225};
1226
1227static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1228{
1229        return container_of(dev, struct drm_i915_private, drm);
1230}
1231
1232static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
1233{
1234        return dev_get_drvdata(kdev);
1235}
1236
1237static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
1238{
1239        return pci_get_drvdata(pdev);
1240}
1241
1242/* Simple iterator over all initialised engines */
1243#define for_each_engine(engine__, dev_priv__, id__) \
1244        for ((id__) = 0; \
1245             (id__) < I915_NUM_ENGINES; \
1246             (id__)++) \
1247                for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
1248
1249/* Iterator over subset of engines selected by mask */
1250#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
1251        for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
1252             (tmp__) ? \
1253             ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
1254             0;)
1255
1256#define rb_to_uabi_engine(rb) \
1257        rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
1258
1259#define for_each_uabi_engine(engine__, i915__) \
1260        for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
1261             (engine__); \
1262             (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1263
1264#define for_each_uabi_class_engine(engine__, class__, i915__) \
1265        for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
1266             (engine__) && (engine__)->uabi_class == (class__); \
1267             (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1268
1269#define I915_GTT_OFFSET_NONE ((u32)-1)
1270
1271/*
1272 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
1273 * considered to be the frontbuffer for the given plane interface-wise. This
1274 * doesn't mean that the hw necessarily already scans it out, but that any
1275 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
1276 *
1277 * We have one bit per pipe and per scanout plane type.
1278 */
1279#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
1280#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
1281        BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
1282        BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
1283        BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
1284})
1285#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
1286        BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1287#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
1288        GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
1289                INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1290
1291#define INTEL_INFO(dev_priv)    (&(dev_priv)->__info)
1292#define RUNTIME_INFO(dev_priv)  (&(dev_priv)->__runtime)
1293#define DRIVER_CAPS(dev_priv)   (&(dev_priv)->caps)
1294
1295#define INTEL_GEN(dev_priv)     (INTEL_INFO(dev_priv)->gen)
1296#define INTEL_DEVID(dev_priv)   (RUNTIME_INFO(dev_priv)->device_id)
1297
1298#define REVID_FOREVER           0xff
1299#define INTEL_REVID(dev_priv)   ((dev_priv)->drm.pdev->revision)
1300
1301#define INTEL_GEN_MASK(s, e) ( \
1302        BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
1303        BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
1304        GENMASK((e) - 1, (s) - 1))
1305
1306/* Returns true if Gen is in inclusive range [Start, End] */
1307#define IS_GEN_RANGE(dev_priv, s, e) \
1308        (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
1309
1310#define IS_GEN(dev_priv, n) \
1311        (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
1312         INTEL_INFO(dev_priv)->gen == (n))
1313
1314#define HAS_DSB(dev_priv)       (INTEL_INFO(dev_priv)->display.has_dsb)
1315
1316/*
1317 * Return true if revision is in range [since,until] inclusive.
1318 *
1319 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
1320 */
1321#define IS_REVID(p, since, until) \
1322        (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
1323
1324static __always_inline unsigned int
1325__platform_mask_index(const struct intel_runtime_info *info,
1326                      enum intel_platform p)
1327{
1328        const unsigned int pbits =
1329                BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1330
1331        /* Expand the platform_mask array if this fails. */
1332        BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1333                     pbits * ARRAY_SIZE(info->platform_mask));
1334
1335        return p / pbits;
1336}
1337
1338static __always_inline unsigned int
1339__platform_mask_bit(const struct intel_runtime_info *info,
1340                    enum intel_platform p)
1341{
1342        const unsigned int pbits =
1343                BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1344
1345        return p % pbits + INTEL_SUBPLATFORM_BITS;
1346}
1347
1348static inline u32
1349intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
1350{
1351        const unsigned int pi = __platform_mask_index(info, p);
1352
1353        return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
1354}
1355
1356static __always_inline bool
1357IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
1358{
1359        const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1360        const unsigned int pi = __platform_mask_index(info, p);
1361        const unsigned int pb = __platform_mask_bit(info, p);
1362
1363        BUILD_BUG_ON(!__builtin_constant_p(p));
1364
1365        return info->platform_mask[pi] & BIT(pb);
1366}
1367
1368static __always_inline bool
1369IS_SUBPLATFORM(const struct drm_i915_private *i915,
1370               enum intel_platform p, unsigned int s)
1371{
1372        const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1373        const unsigned int pi = __platform_mask_index(info, p);
1374        const unsigned int pb = __platform_mask_bit(info, p);
1375        const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
1376        const u32 mask = info->platform_mask[pi];
1377
1378        BUILD_BUG_ON(!__builtin_constant_p(p));
1379        BUILD_BUG_ON(!__builtin_constant_p(s));
1380        BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
1381
1382        /* Shift and test on the MSB position so sign flag can be used. */
1383        return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
1384}
1385
1386#define IS_MOBILE(dev_priv)     (INTEL_INFO(dev_priv)->is_mobile)
1387#define IS_DGFX(dev_priv)   (INTEL_INFO(dev_priv)->is_dgfx)
1388
1389#define IS_I830(dev_priv)       IS_PLATFORM(dev_priv, INTEL_I830)
1390#define IS_I845G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I845G)
1391#define IS_I85X(dev_priv)       IS_PLATFORM(dev_priv, INTEL_I85X)
1392#define IS_I865G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I865G)
1393#define IS_I915G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I915G)
1394#define IS_I915GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I915GM)
1395#define IS_I945G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I945G)
1396#define IS_I945GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I945GM)
1397#define IS_I965G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I965G)
1398#define IS_I965GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I965GM)
1399#define IS_G45(dev_priv)        IS_PLATFORM(dev_priv, INTEL_G45)
1400#define IS_GM45(dev_priv)       IS_PLATFORM(dev_priv, INTEL_GM45)
1401#define IS_G4X(dev_priv)        (IS_G45(dev_priv) || IS_GM45(dev_priv))
1402#define IS_PINEVIEW(dev_priv)   IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
1403#define IS_G33(dev_priv)        IS_PLATFORM(dev_priv, INTEL_G33)
1404#define IS_IRONLAKE(dev_priv)   IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
1405#define IS_IRONLAKE_M(dev_priv) \
1406        (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
1407#define IS_IVYBRIDGE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
1408#define IS_IVB_GT1(dev_priv)    (IS_IVYBRIDGE(dev_priv) && \
1409                                 INTEL_INFO(dev_priv)->gt == 1)
1410#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
1411#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
1412#define IS_HASWELL(dev_priv)    IS_PLATFORM(dev_priv, INTEL_HASWELL)
1413#define IS_BROADWELL(dev_priv)  IS_PLATFORM(dev_priv, INTEL_BROADWELL)
1414#define IS_SKYLAKE(dev_priv)    IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
1415#define IS_BROXTON(dev_priv)    IS_PLATFORM(dev_priv, INTEL_BROXTON)
1416#define IS_KABYLAKE(dev_priv)   IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
1417#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
1418#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
1419#define IS_COMETLAKE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
1420#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
1421#define IS_ICELAKE(dev_priv)    IS_PLATFORM(dev_priv, INTEL_ICELAKE)
1422#define IS_ELKHARTLAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
1423#define IS_TIGERLAKE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
1424#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
1425#define IS_DG1(dev_priv)        IS_PLATFORM(dev_priv, INTEL_DG1)
1426#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
1427                                    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
1428#define IS_BDW_ULT(dev_priv) \
1429        IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
1430#define IS_BDW_ULX(dev_priv) \
1431        IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
1432#define IS_BDW_GT3(dev_priv)    (IS_BROADWELL(dev_priv) && \
1433                                 INTEL_INFO(dev_priv)->gt == 3)
1434#define IS_HSW_ULT(dev_priv) \
1435        IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
1436#define IS_HSW_GT3(dev_priv)    (IS_HASWELL(dev_priv) && \
1437                                 INTEL_INFO(dev_priv)->gt == 3)
1438#define IS_HSW_GT1(dev_priv)    (IS_HASWELL(dev_priv) && \
1439                                 INTEL_INFO(dev_priv)->gt == 1)
1440/* ULX machines are also considered ULT. */
1441#define IS_HSW_ULX(dev_priv) \
1442        IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
1443#define IS_SKL_ULT(dev_priv) \
1444        IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
1445#define IS_SKL_ULX(dev_priv) \
1446        IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
1447#define IS_KBL_ULT(dev_priv) \
1448        IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
1449#define IS_KBL_ULX(dev_priv) \
1450        IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
1451#define IS_SKL_GT2(dev_priv)    (IS_SKYLAKE(dev_priv) && \
1452                                 INTEL_INFO(dev_priv)->gt == 2)
1453#define IS_SKL_GT3(dev_priv)    (IS_SKYLAKE(dev_priv) && \
1454                                 INTEL_INFO(dev_priv)->gt == 3)
1455#define IS_SKL_GT4(dev_priv)    (IS_SKYLAKE(dev_priv) && \
1456                                 INTEL_INFO(dev_priv)->gt == 4)
1457#define IS_KBL_GT2(dev_priv)    (IS_KABYLAKE(dev_priv) && \
1458                                 INTEL_INFO(dev_priv)->gt == 2)
1459#define IS_KBL_GT3(dev_priv)    (IS_KABYLAKE(dev_priv) && \
1460                                 INTEL_INFO(dev_priv)->gt == 3)
1461#define IS_CFL_ULT(dev_priv) \
1462        IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
1463#define IS_CFL_ULX(dev_priv) \
1464        IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
1465#define IS_CFL_GT2(dev_priv)    (IS_COFFEELAKE(dev_priv) && \
1466                                 INTEL_INFO(dev_priv)->gt == 2)
1467#define IS_CFL_GT3(dev_priv)    (IS_COFFEELAKE(dev_priv) && \
1468                                 INTEL_INFO(dev_priv)->gt == 3)
1469
1470#define IS_CML_ULT(dev_priv) \
1471        IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
1472#define IS_CML_ULX(dev_priv) \
1473        IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
1474#define IS_CML_GT2(dev_priv)    (IS_COMETLAKE(dev_priv) && \
1475                                 INTEL_INFO(dev_priv)->gt == 2)
1476
1477#define IS_CNL_WITH_PORT_F(dev_priv) \
1478        IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
1479#define IS_ICL_WITH_PORT_F(dev_priv) \
1480        IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
1481
1482#define IS_TGL_U(dev_priv) \
1483        IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULT)
1484
1485#define IS_TGL_Y(dev_priv) \
1486        IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX)
1487
1488#define SKL_REVID_A0            0x0
1489#define SKL_REVID_B0            0x1
1490#define SKL_REVID_C0            0x2
1491#define SKL_REVID_D0            0x3
1492#define SKL_REVID_E0            0x4
1493#define SKL_REVID_F0            0x5
1494#define SKL_REVID_G0            0x6
1495#define SKL_REVID_H0            0x7
1496
1497#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
1498
1499#define BXT_REVID_A0            0x0
1500#define BXT_REVID_A1            0x1
1501#define BXT_REVID_B0            0x3
1502#define BXT_REVID_B_LAST        0x8
1503#define BXT_REVID_C0            0x9
1504
1505#define IS_BXT_REVID(dev_priv, since, until) \
1506        (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
1507
1508enum {
1509        KBL_REVID_A0,
1510        KBL_REVID_B0,
1511        KBL_REVID_B1,
1512        KBL_REVID_C0,
1513        KBL_REVID_D0,
1514        KBL_REVID_D1,
1515        KBL_REVID_E0,
1516        KBL_REVID_F0,
1517        KBL_REVID_G0,
1518};
1519
1520struct i915_rev_steppings {
1521        u8 gt_stepping;
1522        u8 disp_stepping;
1523};
1524
1525/* Defined in intel_workarounds.c */
1526extern const struct i915_rev_steppings kbl_revids[];
1527
1528#define IS_KBL_GT_REVID(dev_priv, since, until) \
1529        (IS_KABYLAKE(dev_priv) && \
1530         kbl_revids[INTEL_REVID(dev_priv)].gt_stepping >= since && \
1531         kbl_revids[INTEL_REVID(dev_priv)].gt_stepping <= until)
1532#define IS_KBL_DISP_REVID(dev_priv, since, until) \
1533        (IS_KABYLAKE(dev_priv) && \
1534         kbl_revids[INTEL_REVID(dev_priv)].disp_stepping >= since && \
1535         kbl_revids[INTEL_REVID(dev_priv)].disp_stepping <= until)
1536
1537#define GLK_REVID_A0            0x0
1538#define GLK_REVID_A1            0x1
1539#define GLK_REVID_A2            0x2
1540#define GLK_REVID_B0            0x3
1541
1542#define IS_GLK_REVID(dev_priv, since, until) \
1543        (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
1544
1545#define CNL_REVID_A0            0x0
1546#define CNL_REVID_B0            0x1
1547#define CNL_REVID_C0            0x2
1548
1549#define IS_CNL_REVID(p, since, until) \
1550        (IS_CANNONLAKE(p) && IS_REVID(p, since, until))
1551
1552#define ICL_REVID_A0            0x0
1553#define ICL_REVID_A2            0x1
1554#define ICL_REVID_B0            0x3
1555#define ICL_REVID_B2            0x4
1556#define ICL_REVID_C0            0x5
1557
1558#define IS_ICL_REVID(p, since, until) \
1559        (IS_ICELAKE(p) && IS_REVID(p, since, until))
1560
1561#define EHL_REVID_A0            0x0
1562
1563#define IS_EHL_REVID(p, since, until) \
1564        (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
1565
1566enum {
1567        TGL_REVID_A0,
1568        TGL_REVID_B0,
1569        TGL_REVID_B1,
1570        TGL_REVID_C0,
1571        TGL_REVID_D0,
1572};
1573
1574extern const struct i915_rev_steppings tgl_uy_revids[];
1575extern const struct i915_rev_steppings tgl_revids[];
1576
1577static inline const struct i915_rev_steppings *
1578tgl_revids_get(struct drm_i915_private *dev_priv)
1579{
1580        if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
1581                return tgl_uy_revids;
1582        else
1583                return tgl_revids;
1584}
1585
1586#define IS_TGL_DISP_REVID(p, since, until) \
1587        (IS_TIGERLAKE(p) && \
1588         tgl_revids_get(p)->disp_stepping >= (since) && \
1589         tgl_revids_get(p)->disp_stepping <= (until))
1590
1591#define IS_TGL_UY_GT_REVID(p, since, until) \
1592        ((IS_TGL_U(p) || IS_TGL_Y(p)) && \
1593         tgl_uy_revids->gt_stepping >= (since) && \
1594         tgl_uy_revids->gt_stepping <= (until))
1595
1596#define IS_TGL_GT_REVID(p, since, until) \
1597        (IS_TIGERLAKE(p) && \
1598         !(IS_TGL_U(p) || IS_TGL_Y(p)) && \
1599         tgl_revids->gt_stepping >= (since) && \
1600         tgl_revids->gt_stepping <= (until))
1601
1602#define RKL_REVID_A0            0x0
1603#define RKL_REVID_B0            0x1
1604#define RKL_REVID_C0            0x4
1605
1606#define IS_RKL_REVID(p, since, until) \
1607        (IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
1608
1609#define DG1_REVID_A0            0x0
1610#define DG1_REVID_B0            0x1
1611
1612#define IS_DG1_REVID(p, since, until) \
1613        (IS_DG1(p) && IS_REVID(p, since, until))
1614
1615#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
1616#define IS_GEN9_LP(dev_priv)    (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
1617#define IS_GEN9_BC(dev_priv)    (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
1618
1619#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
1620#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
1621
1622#define ENGINE_INSTANCES_MASK(gt, first, count) ({              \
1623        unsigned int first__ = (first);                                 \
1624        unsigned int count__ = (count);                                 \
1625        ((gt)->info.engine_mask &                                               \
1626         GENMASK(first__ + count__ - 1, first__)) >> first__;           \
1627})
1628#define VDBOX_MASK(gt) \
1629        ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
1630#define VEBOX_MASK(gt) \
1631        ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
1632
1633/*
1634 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
1635 * All later gens can run the final buffer from the ppgtt
1636 */
1637#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
1638
1639#define HAS_LLC(dev_priv)       (INTEL_INFO(dev_priv)->has_llc)
1640#define HAS_SNOOP(dev_priv)     (INTEL_INFO(dev_priv)->has_snoop)
1641#define HAS_EDRAM(dev_priv)     ((dev_priv)->edram_size_mb)
1642#define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
1643#define HAS_WT(dev_priv)        ((IS_HASWELL(dev_priv) || \
1644                                 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
1645
1646#define HWS_NEEDS_PHYSICAL(dev_priv)    (INTEL_INFO(dev_priv)->hws_needs_physical)
1647
1648#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
1649                (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
1650#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
1651                (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
1652#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
1653                (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
1654
1655#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
1656
1657#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
1658
1659#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
1660#define HAS_PPGTT(dev_priv) \
1661        (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
1662#define HAS_FULL_PPGTT(dev_priv) \
1663        (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
1664
1665#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
1666        GEM_BUG_ON((sizes) == 0); \
1667        ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
1668})
1669
1670#define HAS_OVERLAY(dev_priv)            (INTEL_INFO(dev_priv)->display.has_overlay)
1671#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
1672                (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
1673
1674/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1675#define HAS_BROKEN_CS_TLB(dev_priv)     (IS_I830(dev_priv) || IS_I845G(dev_priv))
1676
1677#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)   \
1678        (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
1679
1680/* WaRsDisableCoarsePowerGating:skl,cnl */
1681#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv)                    \
1682        (IS_CANNONLAKE(dev_priv) ||                                     \
1683         IS_SKL_GT3(dev_priv) ||                                        \
1684         IS_SKL_GT4(dev_priv))
1685
1686#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
1687#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
1688                                        IS_GEMINILAKE(dev_priv) || \
1689                                        IS_KABYLAKE(dev_priv))
1690
1691/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1692 * rows, which changed the alignment requirements and fence programming.
1693 */
1694#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
1695                                         !(IS_I915G(dev_priv) || \
1696                                         IS_I915GM(dev_priv)))
1697#define SUPPORTS_TV(dev_priv)           (INTEL_INFO(dev_priv)->display.supports_tv)
1698#define I915_HAS_HOTPLUG(dev_priv)      (INTEL_INFO(dev_priv)->display.has_hotplug)
1699
1700#define HAS_FW_BLC(dev_priv)    (INTEL_GEN(dev_priv) > 2)
1701#define HAS_FBC(dev_priv)       (INTEL_INFO(dev_priv)->display.has_fbc)
1702#define HAS_CUR_FBC(dev_priv)   (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
1703
1704#define HAS_IPS(dev_priv)       (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
1705
1706#define HAS_DP_MST(dev_priv)    (INTEL_INFO(dev_priv)->display.has_dp_mst)
1707
1708#define HAS_DDI(dev_priv)                (INTEL_INFO(dev_priv)->display.has_ddi)
1709#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
1710#define HAS_PSR(dev_priv)                (INTEL_INFO(dev_priv)->display.has_psr)
1711#define HAS_PSR_HW_TRACKING(dev_priv) \
1712        (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
1713#define HAS_PSR2_SEL_FETCH(dev_priv)     (INTEL_GEN(dev_priv) >= 12)
1714#define HAS_TRANSCODER(dev_priv, trans)  ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
1715
1716#define HAS_RC6(dev_priv)                (INTEL_INFO(dev_priv)->has_rc6)
1717#define HAS_RC6p(dev_priv)               (INTEL_INFO(dev_priv)->has_rc6p)
1718#define HAS_RC6pp(dev_priv)              (false) /* HW was never validated */
1719
1720#define HAS_RPS(dev_priv)       (INTEL_INFO(dev_priv)->has_rps)
1721
1722#define HAS_CSR(dev_priv)       (INTEL_INFO(dev_priv)->display.has_csr)
1723
1724#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
1725#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
1726
1727#define HAS_IPC(dev_priv)                (INTEL_INFO(dev_priv)->display.has_ipc)
1728
1729#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
1730#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
1731
1732#define HAS_GT_UC(dev_priv)     (INTEL_INFO(dev_priv)->has_gt_uc)
1733
1734#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
1735
1736#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv)     (INTEL_INFO(dev_priv)->has_global_mocs)
1737
1738
1739#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
1740
1741#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
1742
1743/* DPF == dynamic parity feature */
1744#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
1745#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
1746                                 2 : HAS_L3_DPF(dev_priv))
1747
1748#define GT_FREQUENCY_MULTIPLIER 50
1749#define GEN9_FREQ_SCALER 3
1750
1751#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
1752
1753#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
1754
1755/* Only valid when HAS_DISPLAY() is true */
1756#define INTEL_DISPLAY_ENABLED(dev_priv) \
1757        (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
1758
1759static inline bool intel_vtd_active(void)
1760{
1761#ifdef CONFIG_INTEL_IOMMU
1762        if (intel_iommu_gfx_mapped)
1763                return true;
1764#endif
1765
1766        /* Running as a guest, we assume the host is enforcing VT'd */
1767        return !hypervisor_is_type(X86_HYPER_NATIVE);
1768}
1769
1770static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
1771{
1772        return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
1773}
1774
1775static inline bool
1776intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
1777{
1778        return IS_BROXTON(dev_priv) && intel_vtd_active();
1779}
1780
1781/* i915_drv.c */
1782extern const struct dev_pm_ops i915_pm_ops;
1783
1784int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
1785void i915_driver_remove(struct drm_i915_private *i915);
1786
1787int i915_resume_switcheroo(struct drm_i915_private *i915);
1788int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
1789
1790int i915_getparam_ioctl(struct drm_device *dev, void *data,
1791                        struct drm_file *file_priv);
1792
1793/* i915_gem.c */
1794int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
1795void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
1796void i915_gem_init_early(struct drm_i915_private *dev_priv);
1797void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
1798int i915_gem_freeze(struct drm_i915_private *dev_priv);
1799int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
1800
1801struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
1802
1803static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
1804{
1805        /*
1806         * A single pass should suffice to release all the freed objects (along
1807         * most call paths) , but be a little more paranoid in that freeing
1808         * the objects does take a little amount of time, during which the rcu
1809         * callbacks could have added new objects into the freed list, and
1810         * armed the work again.
1811         */
1812        while (atomic_read(&i915->mm.free_count)) {
1813                flush_work(&i915->mm.free_work);
1814                rcu_barrier();
1815        }
1816}
1817
1818static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
1819{
1820        /*
1821         * Similar to objects above (see i915_gem_drain_freed-objects), in
1822         * general we have workers that are armed by RCU and then rearm
1823         * themselves in their callbacks. To be paranoid, we need to
1824         * drain the workqueue a second time after waiting for the RCU
1825         * grace period so that we catch work queued via RCU from the first
1826         * pass. As neither drain_workqueue() nor flush_workqueue() report
1827         * a result, we make an assumption that we only don't require more
1828         * than 3 passes to catch all _recursive_ RCU delayed work.
1829         *
1830         */
1831        int pass = 3;
1832        do {
1833                flush_workqueue(i915->wq);
1834                rcu_barrier();
1835                i915_gem_drain_freed_objects(i915);
1836        } while (--pass);
1837        drain_workqueue(i915->wq);
1838}
1839
1840struct i915_vma * __must_check
1841i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
1842                            struct i915_gem_ww_ctx *ww,
1843                            const struct i915_ggtt_view *view,
1844                            u64 size, u64 alignment, u64 flags);
1845
1846static inline struct i915_vma * __must_check
1847i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1848                         const struct i915_ggtt_view *view,
1849                         u64 size, u64 alignment, u64 flags)
1850{
1851        return i915_gem_object_ggtt_pin_ww(obj, NULL, view, size, alignment, flags);
1852}
1853
1854int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
1855                           unsigned long flags);
1856#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
1857#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
1858#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
1859
1860void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
1861
1862int i915_gem_dumb_create(struct drm_file *file_priv,
1863                         struct drm_device *dev,
1864                         struct drm_mode_create_dumb *args);
1865
1866int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
1867
1868static inline u32 i915_reset_count(struct i915_gpu_error *error)
1869{
1870        return atomic_read(&error->reset_count);
1871}
1872
1873static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
1874                                          const struct intel_engine_cs *engine)
1875{
1876        return atomic_read(&error->reset_engine_count[engine->uabi_class]);
1877}
1878
1879int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
1880void i915_gem_driver_register(struct drm_i915_private *i915);
1881void i915_gem_driver_unregister(struct drm_i915_private *i915);
1882void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
1883void i915_gem_driver_release(struct drm_i915_private *dev_priv);
1884void i915_gem_suspend(struct drm_i915_private *dev_priv);
1885void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
1886void i915_gem_resume(struct drm_i915_private *dev_priv);
1887
1888int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
1889
1890int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1891                                    enum i915_cache_level cache_level);
1892
1893struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1894                                struct dma_buf *dma_buf);
1895
1896struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
1897
1898static inline struct i915_gem_context *
1899__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
1900{
1901        return xa_load(&file_priv->context_xa, id);
1902}
1903
1904static inline struct i915_gem_context *
1905i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
1906{
1907        struct i915_gem_context *ctx;
1908
1909        rcu_read_lock();
1910        ctx = __i915_gem_context_lookup_rcu(file_priv, id);
1911        if (ctx && !kref_get_unless_zero(&ctx->ref))
1912                ctx = NULL;
1913        rcu_read_unlock();
1914
1915        return ctx;
1916}
1917
1918/* i915_gem_evict.c */
1919int __must_check i915_gem_evict_something(struct i915_address_space *vm,
1920                                          u64 min_size, u64 alignment,
1921                                          unsigned long color,
1922                                          u64 start, u64 end,
1923                                          unsigned flags);
1924int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
1925                                         struct drm_mm_node *node,
1926                                         unsigned int flags);
1927int i915_gem_evict_vm(struct i915_address_space *vm);
1928
1929/* i915_gem_internal.c */
1930struct drm_i915_gem_object *
1931i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
1932                                phys_addr_t size);
1933
1934/* i915_gem_tiling.c */
1935static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1936{
1937        struct drm_i915_private *i915 = to_i915(obj->base.dev);
1938
1939        return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1940                i915_gem_object_is_tiled(obj);
1941}
1942
1943u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
1944                        unsigned int tiling, unsigned int stride);
1945u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
1946                             unsigned int tiling, unsigned int stride);
1947
1948const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
1949
1950/* i915_cmd_parser.c */
1951int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
1952void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
1953void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
1954int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1955                            struct i915_vma *batch,
1956                            unsigned long batch_offset,
1957                            unsigned long batch_length,
1958                            struct i915_vma *shadow,
1959                            bool trampoline);
1960#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
1961
1962/* intel_device_info.c */
1963static inline struct intel_device_info *
1964mkwrite_device_info(struct drm_i915_private *dev_priv)
1965{
1966        return (struct intel_device_info *)INTEL_INFO(dev_priv);
1967}
1968
1969int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1970                        struct drm_file *file);
1971
1972#define __I915_REG_OP(op__, dev_priv__, ...) \
1973        intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
1974
1975#define I915_READ(reg__)         __I915_REG_OP(read, dev_priv, (reg__))
1976#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
1977
1978#define POSTING_READ(reg__)     __I915_REG_OP(posting_read, dev_priv, (reg__))
1979
1980/* These are untraced mmio-accessors that are only valid to be used inside
1981 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
1982 * controlled.
1983 *
1984 * Think twice, and think again, before using these.
1985 *
1986 * As an example, these accessors can possibly be used between:
1987 *
1988 * spin_lock_irq(&dev_priv->uncore.lock);
1989 * intel_uncore_forcewake_get__locked();
1990 *
1991 * and
1992 *
1993 * intel_uncore_forcewake_put__locked();
1994 * spin_unlock_irq(&dev_priv->uncore.lock);
1995 *
1996 *
1997 * Note: some registers may not need forcewake held, so
1998 * intel_uncore_forcewake_{get,put} can be omitted, see
1999 * intel_uncore_forcewake_for_reg().
2000 *
2001 * Certain architectures will die if the same cacheline is concurrently accessed
2002 * by different clients (e.g. on Ivybridge). Access to registers should
2003 * therefore generally be serialised, by either the dev_priv->uncore.lock or
2004 * a more localised lock guarding all access to that bank of registers.
2005 */
2006#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
2007#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
2008
2009/* i915_mm.c */
2010int remap_io_mapping(struct vm_area_struct *vma,
2011                     unsigned long addr, unsigned long pfn, unsigned long size,
2012                     struct io_mapping *iomap);
2013int remap_io_sg(struct vm_area_struct *vma,
2014                unsigned long addr, unsigned long size,
2015                struct scatterlist *sgl, resource_size_t iobase);
2016
2017static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
2018{
2019        if (INTEL_GEN(i915) >= 10)
2020                return CNL_HWS_CSB_WRITE_INDEX;
2021        else
2022                return I915_HWS_CSB_WRITE_INDEX;
2023}
2024
2025static inline enum i915_map_type
2026i915_coherent_map_type(struct drm_i915_private *i915)
2027{
2028        return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
2029}
2030
2031static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
2032{
2033        return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz,
2034                                1000000000);
2035}
2036
2037static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val)
2038{
2039        return div_u64(val * 1000000000,
2040                       RUNTIME_INFO(i915)->cs_timestamp_frequency_hz);
2041}
2042
2043#endif
2044