linux/drivers/gpu/drm/i915/i915_drv.h
<<
>>
Prefs
   1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
   2 */
   3/*
   4 *
   5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   6 * All Rights Reserved.
   7 *
   8 * Permission is hereby granted, free of charge, to any person obtaining a
   9 * copy of this software and associated documentation files (the
  10 * "Software"), to deal in the Software without restriction, including
  11 * without limitation the rights to use, copy, modify, merge, publish,
  12 * distribute, sub license, and/or sell copies of the Software, and to
  13 * permit persons to whom the Software is furnished to do so, subject to
  14 * the following conditions:
  15 *
  16 * The above copyright notice and this permission notice (including the
  17 * next paragraph) shall be included in all copies or substantial portions
  18 * of the Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  27 *
  28 */
  29
  30#ifndef _I915_DRV_H_
  31#define _I915_DRV_H_
  32
  33#include <uapi/drm/i915_drm.h>
  34#include <uapi/drm/drm_fourcc.h>
  35
  36#include <asm/hypervisor.h>
  37
  38#include <linux/io-mapping.h>
  39#include <linux/i2c.h>
  40#include <linux/i2c-algo-bit.h>
  41#include <linux/backlight.h>
  42#include <linux/hash.h>
  43#include <linux/intel-iommu.h>
  44#include <linux/kref.h>
  45#include <linux/mm_types.h>
  46#include <linux/perf_event.h>
  47#include <linux/pm_qos.h>
  48#include <linux/dma-resv.h>
  49#include <linux/shmem_fs.h>
  50#include <linux/stackdepot.h>
  51#include <linux/xarray.h>
  52
  53#include <drm/intel-gtt.h>
  54#include <drm/drm_gem.h>
  55#include <drm/drm_auth.h>
  56#include <drm/drm_cache.h>
  57#include <drm/drm_util.h>
  58#include <drm/drm_dsc.h>
  59#include <drm/drm_atomic.h>
  60#include <drm/drm_connector.h>
  61#include <drm/i915_mei_hdcp_interface.h>
  62#include <drm/ttm/ttm_device.h>
  63
  64#include "i915_params.h"
  65#include "i915_reg.h"
  66#include "i915_utils.h"
  67
  68#include "display/intel_bios.h"
  69#include "display/intel_display.h"
  70#include "display/intel_display_power.h"
  71#include "display/intel_dmc.h"
  72#include "display/intel_dpll_mgr.h"
  73#include "display/intel_dsb.h"
  74#include "display/intel_frontbuffer.h"
  75#include "display/intel_global_state.h"
  76#include "display/intel_gmbus.h"
  77#include "display/intel_opregion.h"
  78
  79#include "gem/i915_gem_context_types.h"
  80#include "gem/i915_gem_shrinker.h"
  81#include "gem/i915_gem_stolen.h"
  82#include "gem/i915_gem_lmem.h"
  83
  84#include "gt/intel_engine.h"
  85#include "gt/intel_gt_types.h"
  86#include "gt/intel_region_lmem.h"
  87#include "gt/intel_workarounds.h"
  88#include "gt/uc/intel_uc.h"
  89
  90#include "intel_device_info.h"
  91#include "intel_memory_region.h"
  92#include "intel_pch.h"
  93#include "intel_runtime_pm.h"
  94#include "intel_step.h"
  95#include "intel_uncore.h"
  96#include "intel_wakeref.h"
  97#include "intel_wopcm.h"
  98
  99#include "i915_gem.h"
 100#include "i915_gem_gtt.h"
 101#include "i915_gpu_error.h"
 102#include "i915_perf_types.h"
 103#include "i915_request.h"
 104#include "i915_scheduler.h"
 105#include "gt/intel_timeline.h"
 106#include "i915_vma.h"
 107#include "i915_irq.h"
 108
 109
 110/* General customization:
 111 */
 112
 113#define DRIVER_NAME             "i915"
 114#define DRIVER_DESC             "Intel Graphics"
 115#define DRIVER_DATE             "20201103"
 116#define DRIVER_TIMESTAMP        1604406085
 117
 118struct drm_i915_gem_object;
 119
 120enum hpd_pin {
 121        HPD_NONE = 0,
 122        HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
 123        HPD_CRT,
 124        HPD_SDVO_B,
 125        HPD_SDVO_C,
 126        HPD_PORT_A,
 127        HPD_PORT_B,
 128        HPD_PORT_C,
 129        HPD_PORT_D,
 130        HPD_PORT_E,
 131        HPD_PORT_TC1,
 132        HPD_PORT_TC2,
 133        HPD_PORT_TC3,
 134        HPD_PORT_TC4,
 135        HPD_PORT_TC5,
 136        HPD_PORT_TC6,
 137
 138        HPD_NUM_PINS
 139};
 140
 141#define for_each_hpd_pin(__pin) \
 142        for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
 143
 144/* Threshold == 5 for long IRQs, 50 for short */
 145#define HPD_STORM_DEFAULT_THRESHOLD 50
 146
 147struct i915_hotplug {
 148        struct delayed_work hotplug_work;
 149
 150        const u32 *hpd, *pch_hpd;
 151
 152        struct {
 153                unsigned long last_jiffies;
 154                int count;
 155                enum {
 156                        HPD_ENABLED = 0,
 157                        HPD_DISABLED = 1,
 158                        HPD_MARK_DISABLED = 2
 159                } state;
 160        } stats[HPD_NUM_PINS];
 161        u32 event_bits;
 162        u32 retry_bits;
 163        struct delayed_work reenable_work;
 164
 165        u32 long_port_mask;
 166        u32 short_port_mask;
 167        struct work_struct dig_port_work;
 168
 169        struct work_struct poll_init_work;
 170        bool poll_enabled;
 171
 172        unsigned int hpd_storm_threshold;
 173        /* Whether or not to count short HPD IRQs in HPD storms */
 174        u8 hpd_short_storm_enabled;
 175
 176        /*
 177         * if we get a HPD irq from DP and a HPD irq from non-DP
 178         * the non-DP HPD could block the workqueue on a mode config
 179         * mutex getting, that userspace may have taken. However
 180         * userspace is waiting on the DP workqueue to run which is
 181         * blocked behind the non-DP one.
 182         */
 183        struct workqueue_struct *dp_wq;
 184};
 185
 186#define I915_GEM_GPU_DOMAINS \
 187        (I915_GEM_DOMAIN_RENDER | \
 188         I915_GEM_DOMAIN_SAMPLER | \
 189         I915_GEM_DOMAIN_COMMAND | \
 190         I915_GEM_DOMAIN_INSTRUCTION | \
 191         I915_GEM_DOMAIN_VERTEX)
 192
 193struct drm_i915_private;
 194struct i915_mm_struct;
 195struct i915_mmu_object;
 196
 197struct drm_i915_file_private {
 198        struct drm_i915_private *dev_priv;
 199
 200        union {
 201                struct drm_file *file;
 202                struct rcu_head rcu;
 203        };
 204
 205        /** @proto_context_lock: Guards all struct i915_gem_proto_context
 206         * operations
 207         *
 208         * This not only guards @proto_context_xa, but is always held
 209         * whenever we manipulate any struct i915_gem_proto_context,
 210         * including finalizing it on first actual use of the GEM context.
 211         *
 212         * See i915_gem_proto_context.
 213         */
 214        struct mutex proto_context_lock;
 215
 216        /** @proto_context_xa: xarray of struct i915_gem_proto_context
 217         *
 218         * Historically, the context uAPI allowed for two methods of
 219         * setting context parameters: SET_CONTEXT_PARAM and
 220         * CONTEXT_CREATE_EXT_SETPARAM.  The former is allowed to be called
 221         * at any time while the later happens as part of
 222         * GEM_CONTEXT_CREATE.  Everything settable via one was settable
 223         * via the other.  While some params are fairly simple and setting
 224         * them on a live context is harmless such as the context priority,
 225         * others are far trickier such as the VM or the set of engines.
 226         * In order to swap out the VM, for instance, we have to delay
 227         * until all current in-flight work is complete, swap in the new
 228         * VM, and then continue.  This leads to a plethora of potential
 229         * race conditions we'd really rather avoid.
 230         *
 231         * We have since disallowed setting these more complex parameters
 232         * on active contexts.  This works by delaying the creation of the
 233         * actual context until after the client is done configuring it
 234         * with SET_CONTEXT_PARAM.  From the perspective of the client, it
 235         * has the same u32 context ID the whole time.  From the
 236         * perspective of i915, however, it's a struct i915_gem_proto_context
 237         * right up until the point where we attempt to do something which
 238         * the proto-context can't handle.  Then the struct i915_gem_context
 239         * gets created.
 240         *
 241         * This is accomplished via a little xarray dance.  When
 242         * GEM_CONTEXT_CREATE is called, we create a struct
 243         * i915_gem_proto_context, reserve a slot in @context_xa but leave
 244         * it NULL, and place the proto-context in the corresponding slot
 245         * in @proto_context_xa.  Then, in i915_gem_context_lookup(), we
 246         * first check @context_xa.  If it's there, we return the struct
 247         * i915_gem_context and we're done.  If it's not, we look in
 248         * @proto_context_xa and, if we find it there, we create the actual
 249         * context and kill the proto-context.
 250         *
 251         * In order for this dance to work properly, everything which ever
 252         * touches a struct i915_gem_proto_context is guarded by
 253         * @proto_context_lock, including context creation.  Yes, this
 254         * means context creation now takes a giant global lock but it
 255         * can't really be helped and that should never be on any driver's
 256         * fast-path anyway.
 257         */
 258        struct xarray proto_context_xa;
 259
 260        /** @context_xa: xarray of fully created i915_gem_context
 261         *
 262         * Write access to this xarray is guarded by @proto_context_lock.
 263         * Otherwise, writers may race with finalize_create_context_locked().
 264         *
 265         * See @proto_context_xa.
 266         */
 267        struct xarray context_xa;
 268        struct xarray vm_xa;
 269
 270        unsigned int bsd_engine;
 271
 272/*
 273 * Every context ban increments per client ban score. Also
 274 * hangs in short succession increments ban score. If ban threshold
 275 * is reached, client is considered banned and submitting more work
 276 * will fail. This is a stop gap measure to limit the badly behaving
 277 * clients access to gpu. Note that unbannable contexts never increment
 278 * the client ban score.
 279 */
 280#define I915_CLIENT_SCORE_HANG_FAST     1
 281#define   I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
 282#define I915_CLIENT_SCORE_CONTEXT_BAN   3
 283#define I915_CLIENT_SCORE_BANNED        9
 284        /** ban_score: Accumulated score of all ctx bans and fast hangs. */
 285        atomic_t ban_score;
 286        unsigned long hang_timestamp;
 287};
 288
 289/* Interface history:
 290 *
 291 * 1.1: Original.
 292 * 1.2: Add Power Management
 293 * 1.3: Add vblank support
 294 * 1.4: Fix cmdbuffer path, add heap destroy
 295 * 1.5: Add vblank pipe configuration
 296 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
 297 *      - Support vertical blank on secondary display pipe
 298 */
 299#define DRIVER_MAJOR            1
 300#define DRIVER_MINOR            6
 301#define DRIVER_PATCHLEVEL       0
 302
 303struct intel_overlay;
 304struct intel_overlay_error_state;
 305
 306struct sdvo_device_mapping {
 307        u8 initialized;
 308        u8 dvo_port;
 309        u8 slave_addr;
 310        u8 dvo_wiring;
 311        u8 i2c_pin;
 312        u8 ddc_pin;
 313};
 314
 315struct intel_connector;
 316struct intel_encoder;
 317struct intel_atomic_state;
 318struct intel_cdclk_config;
 319struct intel_cdclk_state;
 320struct intel_cdclk_vals;
 321struct intel_initial_plane_config;
 322struct intel_crtc;
 323struct intel_limit;
 324struct dpll;
 325
 326struct drm_i915_display_funcs {
 327        void (*get_cdclk)(struct drm_i915_private *dev_priv,
 328                          struct intel_cdclk_config *cdclk_config);
 329        void (*set_cdclk)(struct drm_i915_private *dev_priv,
 330                          const struct intel_cdclk_config *cdclk_config,
 331                          enum pipe pipe);
 332        int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
 333        int (*get_fifo_size)(struct drm_i915_private *dev_priv,
 334                             enum i9xx_plane_id i9xx_plane);
 335        int (*compute_pipe_wm)(struct intel_atomic_state *state,
 336                               struct intel_crtc *crtc);
 337        int (*compute_intermediate_wm)(struct intel_atomic_state *state,
 338                                       struct intel_crtc *crtc);
 339        void (*initial_watermarks)(struct intel_atomic_state *state,
 340                                   struct intel_crtc *crtc);
 341        void (*atomic_update_watermarks)(struct intel_atomic_state *state,
 342                                         struct intel_crtc *crtc);
 343        void (*optimize_watermarks)(struct intel_atomic_state *state,
 344                                    struct intel_crtc *crtc);
 345        int (*compute_global_watermarks)(struct intel_atomic_state *state);
 346        void (*update_wm)(struct intel_crtc *crtc);
 347        int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
 348        u8 (*calc_voltage_level)(int cdclk);
 349        /* Returns the active state of the crtc, and if the crtc is active,
 350         * fills out the pipe-config with the hw state. */
 351        bool (*get_pipe_config)(struct intel_crtc *,
 352                                struct intel_crtc_state *);
 353        void (*get_initial_plane_config)(struct intel_crtc *,
 354                                         struct intel_initial_plane_config *);
 355        int (*crtc_compute_clock)(struct intel_crtc *crtc,
 356                                  struct intel_crtc_state *crtc_state);
 357        void (*crtc_enable)(struct intel_atomic_state *state,
 358                            struct intel_crtc *crtc);
 359        void (*crtc_disable)(struct intel_atomic_state *state,
 360                             struct intel_crtc *crtc);
 361        void (*commit_modeset_enables)(struct intel_atomic_state *state);
 362        void (*commit_modeset_disables)(struct intel_atomic_state *state);
 363        void (*audio_codec_enable)(struct intel_encoder *encoder,
 364                                   const struct intel_crtc_state *crtc_state,
 365                                   const struct drm_connector_state *conn_state);
 366        void (*audio_codec_disable)(struct intel_encoder *encoder,
 367                                    const struct intel_crtc_state *old_crtc_state,
 368                                    const struct drm_connector_state *old_conn_state);
 369        void (*fdi_link_train)(struct intel_crtc *crtc,
 370                               const struct intel_crtc_state *crtc_state);
 371        void (*init_clock_gating)(struct drm_i915_private *dev_priv);
 372        void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
 373        /* clock updates for mode set */
 374        /* cursor updates */
 375        /* render clock increase/decrease */
 376        /* display clock increase/decrease */
 377        /* pll clock increase/decrease */
 378
 379        int (*color_check)(struct intel_crtc_state *crtc_state);
 380        /*
 381         * Program double buffered color management registers during
 382         * vblank evasion. The registers should then latch during the
 383         * next vblank start, alongside any other double buffered registers
 384         * involved with the same commit.
 385         */
 386        void (*color_commit)(const struct intel_crtc_state *crtc_state);
 387        /*
 388         * Load LUTs (and other single buffered color management
 389         * registers). Will (hopefully) be called during the vblank
 390         * following the latching of any double buffered registers
 391         * involved with the same commit.
 392         */
 393        void (*load_luts)(const struct intel_crtc_state *crtc_state);
 394        void (*read_luts)(struct intel_crtc_state *crtc_state);
 395};
 396
 397
 398#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
 399
 400struct intel_fbc {
 401        /* This is always the inner lock when overlapping with struct_mutex and
 402         * it's the outer lock when overlapping with stolen_lock. */
 403        struct mutex lock;
 404        unsigned int possible_framebuffer_bits;
 405        unsigned int busy_bits;
 406        struct intel_crtc *crtc;
 407
 408        struct drm_mm_node compressed_fb;
 409        struct drm_mm_node compressed_llb;
 410
 411        u8 limit;
 412
 413        bool false_color;
 414
 415        bool active;
 416        bool activated;
 417        bool flip_pending;
 418
 419        bool underrun_detected;
 420        struct work_struct underrun_work;
 421
 422        /*
 423         * Due to the atomic rules we can't access some structures without the
 424         * appropriate locking, so we cache information here in order to avoid
 425         * these problems.
 426         */
 427        struct intel_fbc_state_cache {
 428                struct {
 429                        unsigned int mode_flags;
 430                        u32 hsw_bdw_pixel_rate;
 431                } crtc;
 432
 433                struct {
 434                        unsigned int rotation;
 435                        int src_w;
 436                        int src_h;
 437                        bool visible;
 438                        /*
 439                         * Display surface base address adjustement for
 440                         * pageflips. Note that on gen4+ this only adjusts up
 441                         * to a tile, offsets within a tile are handled in
 442                         * the hw itself (with the TILEOFF register).
 443                         */
 444                        int adjusted_x;
 445                        int adjusted_y;
 446
 447                        u16 pixel_blend_mode;
 448                } plane;
 449
 450                struct {
 451                        const struct drm_format_info *format;
 452                        unsigned int stride;
 453                        u64 modifier;
 454                } fb;
 455
 456                unsigned int fence_y_offset;
 457                u16 gen9_wa_cfb_stride;
 458                u16 interval;
 459                s8 fence_id;
 460                bool psr2_active;
 461        } state_cache;
 462
 463        /*
 464         * This structure contains everything that's relevant to program the
 465         * hardware registers. When we want to figure out if we need to disable
 466         * and re-enable FBC for a new configuration we just check if there's
 467         * something different in the struct. The genx_fbc_activate functions
 468         * are supposed to read from it in order to program the registers.
 469         */
 470        struct intel_fbc_reg_params {
 471                struct {
 472                        enum pipe pipe;
 473                        enum i9xx_plane_id i9xx_plane;
 474                } crtc;
 475
 476                struct {
 477                        const struct drm_format_info *format;
 478                        unsigned int stride;
 479                        u64 modifier;
 480                } fb;
 481
 482                int cfb_size;
 483                unsigned int fence_y_offset;
 484                u16 gen9_wa_cfb_stride;
 485                u16 interval;
 486                s8 fence_id;
 487                bool plane_visible;
 488        } params;
 489
 490        const char *no_fbc_reason;
 491};
 492
 493/*
 494 * HIGH_RR is the highest eDP panel refresh rate read from EDID
 495 * LOW_RR is the lowest eDP panel refresh rate found from EDID
 496 * parsing for same resolution.
 497 */
 498enum drrs_refresh_rate_type {
 499        DRRS_HIGH_RR,
 500        DRRS_LOW_RR,
 501        DRRS_MAX_RR, /* RR count */
 502};
 503
 504enum drrs_support_type {
 505        DRRS_NOT_SUPPORTED = 0,
 506        STATIC_DRRS_SUPPORT = 1,
 507        SEAMLESS_DRRS_SUPPORT = 2
 508};
 509
 510struct intel_dp;
 511struct i915_drrs {
 512        struct mutex mutex;
 513        struct delayed_work work;
 514        struct intel_dp *dp;
 515        unsigned busy_frontbuffer_bits;
 516        enum drrs_refresh_rate_type refresh_rate_type;
 517        enum drrs_support_type type;
 518};
 519
 520#define QUIRK_LVDS_SSC_DISABLE (1<<1)
 521#define QUIRK_INVERT_BRIGHTNESS (1<<2)
 522#define QUIRK_BACKLIGHT_PRESENT (1<<3)
 523#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
 524#define QUIRK_INCREASE_T12_DELAY (1<<6)
 525#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
 526#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
 527
 528struct intel_fbdev;
 529struct intel_fbc_work;
 530
 531struct intel_gmbus {
 532        struct i2c_adapter adapter;
 533#define GMBUS_FORCE_BIT_RETRY (1U << 31)
 534        u32 force_bit;
 535        u32 reg0;
 536        i915_reg_t gpio_reg;
 537        struct i2c_algo_bit_data bit_algo;
 538        struct drm_i915_private *dev_priv;
 539};
 540
 541struct i915_suspend_saved_registers {
 542        u32 saveDSPARB;
 543        u32 saveSWF0[16];
 544        u32 saveSWF1[16];
 545        u32 saveSWF3[3];
 546        u16 saveGCDGMBUS;
 547};
 548
 549struct vlv_s0ix_state;
 550
 551#define MAX_L3_SLICES 2
 552struct intel_l3_parity {
 553        u32 *remap_info[MAX_L3_SLICES];
 554        struct work_struct error_work;
 555        int which_slice;
 556};
 557
 558struct i915_gem_mm {
 559        /*
 560         * Shortcut for the stolen region. This points to either
 561         * INTEL_REGION_STOLEN_SMEM for integrated platforms, or
 562         * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't
 563         * support stolen.
 564         */
 565        struct intel_memory_region *stolen_region;
 566        /** Memory allocator for GTT stolen memory */
 567        struct drm_mm stolen;
 568        /** Protects the usage of the GTT stolen memory allocator. This is
 569         * always the inner lock when overlapping with struct_mutex. */
 570        struct mutex stolen_lock;
 571
 572        /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
 573        spinlock_t obj_lock;
 574
 575        /**
 576         * List of objects which are purgeable.
 577         */
 578        struct list_head purge_list;
 579
 580        /**
 581         * List of objects which have allocated pages and are shrinkable.
 582         */
 583        struct list_head shrink_list;
 584
 585        /**
 586         * List of objects which are pending destruction.
 587         */
 588        struct llist_head free_list;
 589        struct work_struct free_work;
 590        /**
 591         * Count of objects pending destructions. Used to skip needlessly
 592         * waiting on an RCU barrier if no objects are waiting to be freed.
 593         */
 594        atomic_t free_count;
 595
 596        /**
 597         * tmpfs instance used for shmem backed objects
 598         */
 599        struct vfsmount *gemfs;
 600
 601        struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
 602
 603        struct notifier_block oom_notifier;
 604        struct notifier_block vmap_notifier;
 605        struct shrinker shrinker;
 606
 607#ifdef CONFIG_MMU_NOTIFIER
 608        /**
 609         * notifier_lock for mmu notifiers, memory may not be allocated
 610         * while holding this lock.
 611         */
 612        rwlock_t notifier_lock;
 613#endif
 614
 615        /* shrinker accounting, also useful for userland debugging */
 616        u64 shrink_memory;
 617        u32 shrink_count;
 618};
 619
 620#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
 621
 622unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
 623                                         u64 context);
 624
 625static inline unsigned long
 626i915_fence_timeout(const struct drm_i915_private *i915)
 627{
 628        return i915_fence_context_timeout(i915, U64_MAX);
 629}
 630
 631/* Amount of SAGV/QGV points, BSpec precisely defines this */
 632#define I915_NUM_QGV_POINTS 8
 633
 634#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
 635
 636/* Amount of PSF GV points, BSpec precisely defines this */
 637#define I915_NUM_PSF_GV_POINTS 3
 638
 639struct ddi_vbt_port_info {
 640        /* Non-NULL if port present. */
 641        struct intel_bios_encoder_data *devdata;
 642
 643        int max_tmds_clock;
 644
 645        /* This is an index in the HDMI/DVI DDI buffer translation table. */
 646        u8 hdmi_level_shift;
 647        u8 hdmi_level_shift_set:1;
 648
 649        u8 alternate_aux_channel;
 650        u8 alternate_ddc_pin;
 651
 652        int dp_max_link_rate;           /* 0 for not limited by VBT */
 653};
 654
 655enum psr_lines_to_wait {
 656        PSR_0_LINES_TO_WAIT = 0,
 657        PSR_1_LINE_TO_WAIT,
 658        PSR_4_LINES_TO_WAIT,
 659        PSR_8_LINES_TO_WAIT
 660};
 661
 662struct intel_vbt_data {
 663        /* bdb version */
 664        u16 version;
 665
 666        struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 667        struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 668
 669        /* Feature bits */
 670        unsigned int int_tv_support:1;
 671        unsigned int lvds_dither:1;
 672        unsigned int int_crt_support:1;
 673        unsigned int lvds_use_ssc:1;
 674        unsigned int int_lvds_support:1;
 675        unsigned int display_clock_mode:1;
 676        unsigned int fdi_rx_polarity_inverted:1;
 677        unsigned int panel_type:4;
 678        int lvds_ssc_freq;
 679        unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
 680        enum drm_panel_orientation orientation;
 681
 682        enum drrs_support_type drrs_type;
 683
 684        struct {
 685                int rate;
 686                int lanes;
 687                int preemphasis;
 688                int vswing;
 689                bool low_vswing;
 690                bool initialized;
 691                int bpp;
 692                struct edp_power_seq pps;
 693                bool hobl;
 694        } edp;
 695
 696        struct {
 697                bool enable;
 698                bool full_link;
 699                bool require_aux_wakeup;
 700                int idle_frames;
 701                enum psr_lines_to_wait lines_to_wait;
 702                int tp1_wakeup_time_us;
 703                int tp2_tp3_wakeup_time_us;
 704                int psr2_tp2_tp3_wakeup_time_us;
 705        } psr;
 706
 707        struct {
 708                u16 pwm_freq_hz;
 709                bool present;
 710                bool active_low_pwm;
 711                u8 min_brightness;      /* min_brightness/255 of max */
 712                u8 controller;          /* brightness controller number */
 713                enum intel_backlight_type type;
 714        } backlight;
 715
 716        /* MIPI DSI */
 717        struct {
 718                u16 panel_id;
 719                struct mipi_config *config;
 720                struct mipi_pps_data *pps;
 721                u16 bl_ports;
 722                u16 cabc_ports;
 723                u8 seq_version;
 724                u32 size;
 725                u8 *data;
 726                const u8 *sequence[MIPI_SEQ_MAX];
 727                u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
 728                enum drm_panel_orientation orientation;
 729        } dsi;
 730
 731        int crt_ddc_pin;
 732
 733        struct list_head display_devices;
 734
 735        struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
 736        struct sdvo_device_mapping sdvo_mappings[2];
 737};
 738
 739enum intel_ddb_partitioning {
 740        INTEL_DDB_PART_1_2,
 741        INTEL_DDB_PART_5_6, /* IVB+ */
 742};
 743
 744struct ilk_wm_values {
 745        u32 wm_pipe[3];
 746        u32 wm_lp[3];
 747        u32 wm_lp_spr[3];
 748        bool enable_fbc_wm;
 749        enum intel_ddb_partitioning partitioning;
 750};
 751
 752struct g4x_pipe_wm {
 753        u16 plane[I915_MAX_PLANES];
 754        u16 fbc;
 755};
 756
 757struct g4x_sr_wm {
 758        u16 plane;
 759        u16 cursor;
 760        u16 fbc;
 761};
 762
 763struct vlv_wm_ddl_values {
 764        u8 plane[I915_MAX_PLANES];
 765};
 766
 767struct vlv_wm_values {
 768        struct g4x_pipe_wm pipe[3];
 769        struct g4x_sr_wm sr;
 770        struct vlv_wm_ddl_values ddl[3];
 771        u8 level;
 772        bool cxsr;
 773};
 774
 775struct g4x_wm_values {
 776        struct g4x_pipe_wm pipe[2];
 777        struct g4x_sr_wm sr;
 778        struct g4x_sr_wm hpll;
 779        bool cxsr;
 780        bool hpll_en;
 781        bool fbc_en;
 782};
 783
 784struct skl_ddb_entry {
 785        u16 start, end; /* in number of blocks, 'end' is exclusive */
 786};
 787
 788static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
 789{
 790        return entry->end - entry->start;
 791}
 792
 793static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
 794                                       const struct skl_ddb_entry *e2)
 795{
 796        if (e1->start == e2->start && e1->end == e2->end)
 797                return true;
 798
 799        return false;
 800}
 801
 802struct i915_frontbuffer_tracking {
 803        spinlock_t lock;
 804
 805        /*
 806         * Tracking bits for delayed frontbuffer flushing du to gpu activity or
 807         * scheduled flips.
 808         */
 809        unsigned busy_bits;
 810        unsigned flip_bits;
 811};
 812
 813struct i915_virtual_gpu {
 814        struct mutex lock; /* serialises sending of g2v_notify command pkts */
 815        bool active;
 816        u32 caps;
 817};
 818
 819struct intel_cdclk_config {
 820        unsigned int cdclk, vco, ref, bypass;
 821        u8 voltage_level;
 822};
 823
 824struct i915_selftest_stash {
 825        atomic_t counter;
 826        struct ida mock_region_instances;
 827};
 828
 829struct drm_i915_private {
 830        struct drm_device drm;
 831
 832        /* FIXME: Device release actions should all be moved to drmm_ */
 833        bool do_release;
 834
 835        /* i915 device parameters */
 836        struct i915_params params;
 837
 838        const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
 839        struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
 840        struct intel_driver_caps caps;
 841
 842        /**
 843         * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
 844         * end of stolen which we can optionally use to create GEM objects
 845         * backed by stolen memory. Note that stolen_usable_size tells us
 846         * exactly how much of this we are actually allowed to use, given that
 847         * some portion of it is in fact reserved for use by hardware functions.
 848         */
 849        struct resource dsm;
 850        /**
 851         * Reseved portion of Data Stolen Memory
 852         */
 853        struct resource dsm_reserved;
 854
 855        /*
 856         * Stolen memory is segmented in hardware with different portions
 857         * offlimits to certain functions.
 858         *
 859         * The drm_mm is initialised to the total accessible range, as found
 860         * from the PCI config. On Broadwell+, this is further restricted to
 861         * avoid the first page! The upper end of stolen memory is reserved for
 862         * hardware functions and similarly removed from the accessible range.
 863         */
 864        resource_size_t stolen_usable_size;     /* Total size minus reserved ranges */
 865
 866        struct intel_uncore uncore;
 867        struct intel_uncore_mmio_debug mmio_debug;
 868
 869        struct i915_virtual_gpu vgpu;
 870
 871        struct intel_gvt *gvt;
 872
 873        struct intel_wopcm wopcm;
 874
 875        struct intel_dmc dmc;
 876
 877        struct intel_gmbus gmbus[GMBUS_NUM_PINS];
 878
 879        /** gmbus_mutex protects against concurrent usage of the single hw gmbus
 880         * controller on different i2c buses. */
 881        struct mutex gmbus_mutex;
 882
 883        /**
 884         * Base address of where the gmbus and gpio blocks are located (either
 885         * on PCH or on SoC for platforms without PCH).
 886         */
 887        u32 gpio_mmio_base;
 888
 889        u32 hsw_psr_mmio_adjust;
 890
 891        /* MMIO base address for MIPI regs */
 892        u32 mipi_mmio_base;
 893
 894        u32 pps_mmio_base;
 895
 896        wait_queue_head_t gmbus_wait_queue;
 897
 898        struct pci_dev *bridge_dev;
 899
 900        struct rb_root uabi_engines;
 901
 902        struct resource mch_res;
 903
 904        /* protects the irq masks */
 905        spinlock_t irq_lock;
 906
 907        bool display_irqs_enabled;
 908
 909        /* Sideband mailbox protection */
 910        struct mutex sb_lock;
 911        struct pm_qos_request sb_qos;
 912
 913        /** Cached value of IMR to avoid reads in updating the bitfield */
 914        union {
 915                u32 irq_mask;
 916                u32 de_irq_mask[I915_MAX_PIPES];
 917        };
 918        u32 pipestat_irq_mask[I915_MAX_PIPES];
 919
 920        struct i915_hotplug hotplug;
 921        struct intel_fbc fbc;
 922        struct i915_drrs drrs;
 923        struct intel_opregion opregion;
 924        struct intel_vbt_data vbt;
 925
 926        bool preserve_bios_swizzle;
 927
 928        /* overlay */
 929        struct intel_overlay *overlay;
 930
 931        /* backlight registers and fields in struct intel_panel */
 932        struct mutex backlight_lock;
 933
 934        /* protects panel power sequencer state */
 935        struct mutex pps_mutex;
 936
 937        unsigned int fsb_freq, mem_freq, is_ddr3;
 938        unsigned int skl_preferred_vco_freq;
 939        unsigned int max_cdclk_freq;
 940
 941        unsigned int max_dotclk_freq;
 942        unsigned int hpll_freq;
 943        unsigned int fdi_pll_freq;
 944        unsigned int czclk_freq;
 945
 946        struct {
 947                /* The current hardware cdclk configuration */
 948                struct intel_cdclk_config hw;
 949
 950                /* cdclk, divider, and ratio table from bspec */
 951                const struct intel_cdclk_vals *table;
 952
 953                struct intel_global_obj obj;
 954        } cdclk;
 955
 956        struct {
 957                /* The current hardware dbuf configuration */
 958                u8 enabled_slices;
 959
 960                struct intel_global_obj obj;
 961        } dbuf;
 962
 963        /**
 964         * wq - Driver workqueue for GEM.
 965         *
 966         * NOTE: Work items scheduled here are not allowed to grab any modeset
 967         * locks, for otherwise the flushing done in the pageflip code will
 968         * result in deadlocks.
 969         */
 970        struct workqueue_struct *wq;
 971
 972        /* ordered wq for modesets */
 973        struct workqueue_struct *modeset_wq;
 974        /* unbound hipri wq for page flips/plane updates */
 975        struct workqueue_struct *flip_wq;
 976
 977        /* Display functions */
 978        struct drm_i915_display_funcs display;
 979
 980        /* PCH chipset type */
 981        enum intel_pch pch_type;
 982        unsigned short pch_id;
 983
 984        unsigned long quirks;
 985
 986        struct drm_atomic_state *modeset_restore_state;
 987        struct drm_modeset_acquire_ctx reset_ctx;
 988
 989        struct i915_ggtt ggtt; /* VM representing the global address space */
 990
 991        struct i915_gem_mm mm;
 992
 993        /* Kernel Modesetting */
 994
 995        struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
 996        struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
 997
 998        /**
 999         * dpll and cdclk state is protected by connection_mutex
1000         * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
1001         * Must be global rather than per dpll, because on some platforms plls
1002         * share registers.
1003         */
1004        struct {
1005                struct mutex lock;
1006
1007                int num_shared_dpll;
1008                struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1009                const struct intel_dpll_mgr *mgr;
1010
1011                struct {
1012                        int nssc;
1013                        int ssc;
1014                } ref_clks;
1015        } dpll;
1016
1017        struct list_head global_obj_list;
1018
1019        /*
1020         * For reading active_pipes holding any crtc lock is
1021         * sufficient, for writing must hold all of them.
1022         */
1023        u8 active_pipes;
1024
1025        struct i915_wa_list gt_wa_list;
1026
1027        struct i915_frontbuffer_tracking fb_tracking;
1028
1029        struct intel_atomic_helper {
1030                struct llist_head free_list;
1031                struct work_struct free_work;
1032        } atomic_helper;
1033
1034        bool mchbar_need_disable;
1035
1036        struct intel_l3_parity l3_parity;
1037
1038        /*
1039         * HTI (aka HDPORT) state read during initial hw readout.  Most
1040         * platforms don't have HTI, so this will just stay 0.  Those that do
1041         * will use this later to figure out which PLLs and PHYs are unavailable
1042         * for driver usage.
1043         */
1044        u32 hti_state;
1045
1046        /*
1047         * edram size in MB.
1048         * Cannot be determined by PCIID. You must always read a register.
1049         */
1050        u32 edram_size_mb;
1051
1052        struct i915_power_domains power_domains;
1053
1054        struct i915_gpu_error gpu_error;
1055
1056        struct drm_i915_gem_object *vlv_pctx;
1057
1058        /* list of fbdev register on this device */
1059        struct intel_fbdev *fbdev;
1060        struct work_struct fbdev_suspend_work;
1061
1062        struct drm_property *broadcast_rgb_property;
1063        struct drm_property *force_audio_property;
1064
1065        /* hda/i915 audio component */
1066        struct i915_audio_component *audio_component;
1067        bool audio_component_registered;
1068        /**
1069         * av_mutex - mutex for audio/video sync
1070         *
1071         */
1072        struct mutex av_mutex;
1073        int audio_power_refcount;
1074        u32 audio_freq_cntrl;
1075
1076        u32 fdi_rx_config;
1077
1078        /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
1079        u32 chv_phy_control;
1080        /*
1081         * Shadows for CHV DPLL_MD regs to keep the state
1082         * checker somewhat working in the presence hardware
1083         * crappiness (can't read out DPLL_MD for pipes B & C).
1084         */
1085        u32 chv_dpll_md[I915_MAX_PIPES];
1086        u32 bxt_phy_grc;
1087
1088        u32 suspend_count;
1089        bool power_domains_suspended;
1090        struct i915_suspend_saved_registers regfile;
1091        struct vlv_s0ix_state *vlv_s0ix_state;
1092
1093        enum {
1094                I915_SAGV_UNKNOWN = 0,
1095                I915_SAGV_DISABLED,
1096                I915_SAGV_ENABLED,
1097                I915_SAGV_NOT_CONTROLLED
1098        } sagv_status;
1099
1100        u32 sagv_block_time_us;
1101
1102        struct {
1103                /*
1104                 * Raw watermark latency values:
1105                 * in 0.1us units for WM0,
1106                 * in 0.5us units for WM1+.
1107                 */
1108                /* primary */
1109                u16 pri_latency[5];
1110                /* sprite */
1111                u16 spr_latency[5];
1112                /* cursor */
1113                u16 cur_latency[5];
1114                /*
1115                 * Raw watermark memory latency values
1116                 * for SKL for all 8 levels
1117                 * in 1us units.
1118                 */
1119                u16 skl_latency[8];
1120
1121                /* current hardware state */
1122                union {
1123                        struct ilk_wm_values hw;
1124                        struct vlv_wm_values vlv;
1125                        struct g4x_wm_values g4x;
1126                };
1127
1128                u8 max_level;
1129
1130                /*
1131                 * Should be held around atomic WM register writing; also
1132                 * protects * intel_crtc->wm.active and
1133                 * crtc_state->wm.need_postvbl_update.
1134                 */
1135                struct mutex wm_mutex;
1136        } wm;
1137
1138        struct dram_info {
1139                bool wm_lv_0_adjust_needed;
1140                u8 num_channels;
1141                bool symmetric_memory;
1142                enum intel_dram_type {
1143                        INTEL_DRAM_UNKNOWN,
1144                        INTEL_DRAM_DDR3,
1145                        INTEL_DRAM_DDR4,
1146                        INTEL_DRAM_LPDDR3,
1147                        INTEL_DRAM_LPDDR4,
1148                        INTEL_DRAM_DDR5,
1149                        INTEL_DRAM_LPDDR5,
1150                } type;
1151                u8 num_qgv_points;
1152                u8 num_psf_gv_points;
1153        } dram_info;
1154
1155        struct intel_bw_info {
1156                /* for each QGV point */
1157                unsigned int deratedbw[I915_NUM_QGV_POINTS];
1158                /* for each PSF GV point */
1159                unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
1160                u8 num_qgv_points;
1161                u8 num_psf_gv_points;
1162                u8 num_planes;
1163        } max_bw[6];
1164
1165        struct intel_global_obj bw_obj;
1166
1167        struct intel_runtime_pm runtime_pm;
1168
1169        struct i915_perf perf;
1170
1171        /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1172        struct intel_gt gt;
1173
1174        struct {
1175                struct i915_gem_contexts {
1176                        spinlock_t lock; /* locks list */
1177                        struct list_head list;
1178                } contexts;
1179
1180                /*
1181                 * We replace the local file with a global mappings as the
1182                 * backing storage for the mmap is on the device and not
1183                 * on the struct file, and we do not want to prolong the
1184                 * lifetime of the local fd. To minimise the number of
1185                 * anonymous inodes we create, we use a global singleton to
1186                 * share the global mapping.
1187                 */
1188                struct file *mmap_singleton;
1189        } gem;
1190
1191        u8 framestart_delay;
1192
1193        /* Window2 specifies time required to program DSB (Window2) in number of scan lines */
1194        u8 window2_delay;
1195
1196        u8 pch_ssc_use;
1197
1198        /* For i915gm/i945gm vblank irq workaround */
1199        u8 vblank_enabled;
1200
1201        bool irq_enabled;
1202
1203        /* perform PHY state sanity checks? */
1204        bool chv_phy_assert[2];
1205
1206        bool ipc_enabled;
1207
1208        /* Used to save the pipe-to-encoder mapping for audio */
1209        struct intel_encoder *av_enc_map[I915_MAX_PIPES];
1210
1211        /* necessary resource sharing with HDMI LPE audio driver. */
1212        struct {
1213                struct platform_device *platdev;
1214                int     irq;
1215        } lpe_audio;
1216
1217        struct i915_pmu pmu;
1218
1219        struct i915_hdcp_comp_master *hdcp_master;
1220        bool hdcp_comp_added;
1221
1222        /* Mutex to protect the above hdcp component related values. */
1223        struct mutex hdcp_comp_mutex;
1224
1225        /* The TTM device structure. */
1226        struct ttm_device bdev;
1227
1228        I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
1229
1230        /*
1231         * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1232         * will be rejected. Instead look for a better place.
1233         */
1234};
1235
1236static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1237{
1238        return container_of(dev, struct drm_i915_private, drm);
1239}
1240
1241static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
1242{
1243        return dev_get_drvdata(kdev);
1244}
1245
1246static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
1247{
1248        return pci_get_drvdata(pdev);
1249}
1250
1251/* Simple iterator over all initialised engines */
1252#define for_each_engine(engine__, dev_priv__, id__) \
1253        for ((id__) = 0; \
1254             (id__) < I915_NUM_ENGINES; \
1255             (id__)++) \
1256                for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
1257
1258/* Iterator over subset of engines selected by mask */
1259#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
1260        for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
1261             (tmp__) ? \
1262             ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
1263             0;)
1264
1265#define rb_to_uabi_engine(rb) \
1266        rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
1267
1268#define for_each_uabi_engine(engine__, i915__) \
1269        for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
1270             (engine__); \
1271             (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1272
1273#define for_each_uabi_class_engine(engine__, class__, i915__) \
1274        for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
1275             (engine__) && (engine__)->uabi_class == (class__); \
1276             (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1277
1278#define I915_GTT_OFFSET_NONE ((u32)-1)
1279
1280/*
1281 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
1282 * considered to be the frontbuffer for the given plane interface-wise. This
1283 * doesn't mean that the hw necessarily already scans it out, but that any
1284 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
1285 *
1286 * We have one bit per pipe and per scanout plane type.
1287 */
1288#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
1289#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
1290        BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
1291        BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
1292        BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
1293})
1294#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
1295        BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1296#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
1297        GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
1298                INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1299
1300#define INTEL_INFO(dev_priv)    (&(dev_priv)->__info)
1301#define RUNTIME_INFO(dev_priv)  (&(dev_priv)->__runtime)
1302#define DRIVER_CAPS(dev_priv)   (&(dev_priv)->caps)
1303
1304#define INTEL_DEVID(dev_priv)   (RUNTIME_INFO(dev_priv)->device_id)
1305
1306#define IP_VER(ver, rel)                ((ver) << 8 | (rel))
1307
1308#define GRAPHICS_VER(i915)              (INTEL_INFO(i915)->graphics_ver)
1309#define GRAPHICS_VER_FULL(i915)         IP_VER(INTEL_INFO(i915)->graphics_ver, \
1310                                               INTEL_INFO(i915)->graphics_rel)
1311#define IS_GRAPHICS_VER(i915, from, until) \
1312        (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
1313
1314#define MEDIA_VER(i915)                 (INTEL_INFO(i915)->media_ver)
1315#define MEDIA_VER_FULL(i915)            IP_VER(INTEL_INFO(i915)->media_ver, \
1316                                               INTEL_INFO(i915)->media_rel)
1317#define IS_MEDIA_VER(i915, from, until) \
1318        (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
1319
1320#define DISPLAY_VER(i915)       (INTEL_INFO(i915)->display.ver)
1321#define IS_DISPLAY_VER(i915, from, until) \
1322        (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
1323
1324#define INTEL_REVID(dev_priv)   (to_pci_dev((dev_priv)->drm.dev)->revision)
1325
1326#define HAS_DSB(dev_priv)       (INTEL_INFO(dev_priv)->display.has_dsb)
1327
1328#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
1329#define INTEL_GT_STEP(__i915) (RUNTIME_INFO(__i915)->step.gt_step)
1330
1331#define IS_DISPLAY_STEP(__i915, since, until) \
1332        (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
1333         INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
1334
1335#define IS_GT_STEP(__i915, since, until) \
1336        (drm_WARN_ON(&(__i915)->drm, INTEL_GT_STEP(__i915) == STEP_NONE), \
1337         INTEL_GT_STEP(__i915) >= (since) && INTEL_GT_STEP(__i915) < (until))
1338
1339static __always_inline unsigned int
1340__platform_mask_index(const struct intel_runtime_info *info,
1341                      enum intel_platform p)
1342{
1343        const unsigned int pbits =
1344                BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1345
1346        /* Expand the platform_mask array if this fails. */
1347        BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1348                     pbits * ARRAY_SIZE(info->platform_mask));
1349
1350        return p / pbits;
1351}
1352
1353static __always_inline unsigned int
1354__platform_mask_bit(const struct intel_runtime_info *info,
1355                    enum intel_platform p)
1356{
1357        const unsigned int pbits =
1358                BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1359
1360        return p % pbits + INTEL_SUBPLATFORM_BITS;
1361}
1362
1363static inline u32
1364intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
1365{
1366        const unsigned int pi = __platform_mask_index(info, p);
1367
1368        return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK;
1369}
1370
1371static __always_inline bool
1372IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
1373{
1374        const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1375        const unsigned int pi = __platform_mask_index(info, p);
1376        const unsigned int pb = __platform_mask_bit(info, p);
1377
1378        BUILD_BUG_ON(!__builtin_constant_p(p));
1379
1380        return info->platform_mask[pi] & BIT(pb);
1381}
1382
1383static __always_inline bool
1384IS_SUBPLATFORM(const struct drm_i915_private *i915,
1385               enum intel_platform p, unsigned int s)
1386{
1387        const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1388        const unsigned int pi = __platform_mask_index(info, p);
1389        const unsigned int pb = __platform_mask_bit(info, p);
1390        const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
1391        const u32 mask = info->platform_mask[pi];
1392
1393        BUILD_BUG_ON(!__builtin_constant_p(p));
1394        BUILD_BUG_ON(!__builtin_constant_p(s));
1395        BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
1396
1397        /* Shift and test on the MSB position so sign flag can be used. */
1398        return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
1399}
1400
1401#define IS_MOBILE(dev_priv)     (INTEL_INFO(dev_priv)->is_mobile)
1402#define IS_DGFX(dev_priv)   (INTEL_INFO(dev_priv)->is_dgfx)
1403
1404#define IS_I830(dev_priv)       IS_PLATFORM(dev_priv, INTEL_I830)
1405#define IS_I845G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I845G)
1406#define IS_I85X(dev_priv)       IS_PLATFORM(dev_priv, INTEL_I85X)
1407#define IS_I865G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I865G)
1408#define IS_I915G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I915G)
1409#define IS_I915GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I915GM)
1410#define IS_I945G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I945G)
1411#define IS_I945GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I945GM)
1412#define IS_I965G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I965G)
1413#define IS_I965GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I965GM)
1414#define IS_G45(dev_priv)        IS_PLATFORM(dev_priv, INTEL_G45)
1415#define IS_GM45(dev_priv)       IS_PLATFORM(dev_priv, INTEL_GM45)
1416#define IS_G4X(dev_priv)        (IS_G45(dev_priv) || IS_GM45(dev_priv))
1417#define IS_PINEVIEW(dev_priv)   IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
1418#define IS_G33(dev_priv)        IS_PLATFORM(dev_priv, INTEL_G33)
1419#define IS_IRONLAKE(dev_priv)   IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
1420#define IS_IRONLAKE_M(dev_priv) \
1421        (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
1422#define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE)
1423#define IS_IVYBRIDGE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
1424#define IS_IVB_GT1(dev_priv)    (IS_IVYBRIDGE(dev_priv) && \
1425                                 INTEL_INFO(dev_priv)->gt == 1)
1426#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
1427#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
1428#define IS_HASWELL(dev_priv)    IS_PLATFORM(dev_priv, INTEL_HASWELL)
1429#define IS_BROADWELL(dev_priv)  IS_PLATFORM(dev_priv, INTEL_BROADWELL)
1430#define IS_SKYLAKE(dev_priv)    IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
1431#define IS_BROXTON(dev_priv)    IS_PLATFORM(dev_priv, INTEL_BROXTON)
1432#define IS_KABYLAKE(dev_priv)   IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
1433#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
1434#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
1435#define IS_COMETLAKE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
1436#define IS_CANNONLAKE(dev_priv) 0
1437#define IS_ICELAKE(dev_priv)    IS_PLATFORM(dev_priv, INTEL_ICELAKE)
1438#define IS_JSL_EHL(dev_priv)    (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
1439                                IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
1440#define IS_TIGERLAKE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
1441#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
1442#define IS_DG1(dev_priv)        IS_PLATFORM(dev_priv, INTEL_DG1)
1443#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
1444#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)
1445#define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV)
1446#define IS_DG2(dev_priv)        IS_PLATFORM(dev_priv, INTEL_DG2)
1447#define IS_DG2_G10(dev_priv) \
1448        IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
1449#define IS_DG2_G11(dev_priv) \
1450        IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
1451#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
1452                                    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
1453#define IS_BDW_ULT(dev_priv) \
1454        IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
1455#define IS_BDW_ULX(dev_priv) \
1456        IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
1457#define IS_BDW_GT3(dev_priv)    (IS_BROADWELL(dev_priv) && \
1458                                 INTEL_INFO(dev_priv)->gt == 3)
1459#define IS_HSW_ULT(dev_priv) \
1460        IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
1461#define IS_HSW_GT3(dev_priv)    (IS_HASWELL(dev_priv) && \
1462                                 INTEL_INFO(dev_priv)->gt == 3)
1463#define IS_HSW_GT1(dev_priv)    (IS_HASWELL(dev_priv) && \
1464                                 INTEL_INFO(dev_priv)->gt == 1)
1465/* ULX machines are also considered ULT. */
1466#define IS_HSW_ULX(dev_priv) \
1467        IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
1468#define IS_SKL_ULT(dev_priv) \
1469        IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
1470#define IS_SKL_ULX(dev_priv) \
1471        IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
1472#define IS_KBL_ULT(dev_priv) \
1473        IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
1474#define IS_KBL_ULX(dev_priv) \
1475        IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
1476#define IS_SKL_GT2(dev_priv)    (IS_SKYLAKE(dev_priv) && \
1477                                 INTEL_INFO(dev_priv)->gt == 2)
1478#define IS_SKL_GT3(dev_priv)    (IS_SKYLAKE(dev_priv) && \
1479                                 INTEL_INFO(dev_priv)->gt == 3)
1480#define IS_SKL_GT4(dev_priv)    (IS_SKYLAKE(dev_priv) && \
1481                                 INTEL_INFO(dev_priv)->gt == 4)
1482#define IS_KBL_GT2(dev_priv)    (IS_KABYLAKE(dev_priv) && \
1483                                 INTEL_INFO(dev_priv)->gt == 2)
1484#define IS_KBL_GT3(dev_priv)    (IS_KABYLAKE(dev_priv) && \
1485                                 INTEL_INFO(dev_priv)->gt == 3)
1486#define IS_CFL_ULT(dev_priv) \
1487        IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
1488#define IS_CFL_ULX(dev_priv) \
1489        IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
1490#define IS_CFL_GT2(dev_priv)    (IS_COFFEELAKE(dev_priv) && \
1491                                 INTEL_INFO(dev_priv)->gt == 2)
1492#define IS_CFL_GT3(dev_priv)    (IS_COFFEELAKE(dev_priv) && \
1493                                 INTEL_INFO(dev_priv)->gt == 3)
1494
1495#define IS_CML_ULT(dev_priv) \
1496        IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
1497#define IS_CML_ULX(dev_priv) \
1498        IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
1499#define IS_CML_GT2(dev_priv)    (IS_COMETLAKE(dev_priv) && \
1500                                 INTEL_INFO(dev_priv)->gt == 2)
1501
1502#define IS_ICL_WITH_PORT_F(dev_priv) \
1503        IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
1504
1505#define IS_TGL_U(dev_priv) \
1506        IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULT)
1507
1508#define IS_TGL_Y(dev_priv) \
1509        IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX)
1510
1511#define IS_SKL_GT_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GT_STEP(p, since, until))
1512
1513#define IS_KBL_GT_STEP(dev_priv, since, until) \
1514        (IS_KABYLAKE(dev_priv) && IS_GT_STEP(dev_priv, since, until))
1515#define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \
1516        (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until))
1517
1518#define IS_JSL_EHL_GT_STEP(p, since, until) \
1519        (IS_JSL_EHL(p) && IS_GT_STEP(p, since, until))
1520#define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
1521        (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
1522
1523#define IS_TGL_DISPLAY_STEP(__i915, since, until) \
1524        (IS_TIGERLAKE(__i915) && \
1525         IS_DISPLAY_STEP(__i915, since, until))
1526
1527#define IS_TGL_UY_GT_STEP(__i915, since, until) \
1528        ((IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \
1529         IS_GT_STEP(__i915, since, until))
1530
1531#define IS_TGL_GT_STEP(__i915, since, until) \
1532        (IS_TIGERLAKE(__i915) && !(IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \
1533         IS_GT_STEP(__i915, since, until))
1534
1535#define IS_RKL_DISPLAY_STEP(p, since, until) \
1536        (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
1537
1538#define IS_DG1_GT_STEP(p, since, until) \
1539        (IS_DG1(p) && IS_GT_STEP(p, since, until))
1540#define IS_DG1_DISPLAY_STEP(p, since, until) \
1541        (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
1542
1543#define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
1544        (IS_ALDERLAKE_S(__i915) && \
1545         IS_DISPLAY_STEP(__i915, since, until))
1546
1547#define IS_ADLS_GT_STEP(__i915, since, until) \
1548        (IS_ALDERLAKE_S(__i915) && \
1549         IS_GT_STEP(__i915, since, until))
1550
1551#define IS_ADLP_DISPLAY_STEP(__i915, since, until) \
1552        (IS_ALDERLAKE_P(__i915) && \
1553         IS_DISPLAY_STEP(__i915, since, until))
1554
1555#define IS_ADLP_GT_STEP(__i915, since, until) \
1556        (IS_ALDERLAKE_P(__i915) && \
1557         IS_GT_STEP(__i915, since, until))
1558
1559#define IS_XEHPSDV_GT_STEP(__i915, since, until) \
1560        (IS_XEHPSDV(__i915) && IS_GT_STEP(__i915, since, until))
1561
1562/*
1563 * DG2 hardware steppings are a bit unusual.  The hardware design was forked
1564 * to create two variants (G10 and G11) which have distinct workaround sets.
1565 * The G11 fork of the DG2 design resets the GT stepping back to "A0" for its
1566 * first iteration, even though it's more similar to a G10 B0 stepping in terms
1567 * of functionality and workarounds.  However the display stepping does not
1568 * reset in the same manner --- a specific stepping like "B0" has a consistent
1569 * meaning regardless of whether it belongs to a G10 or G11 DG2.
1570 *
1571 * TLDR:  All GT workarounds and stepping-specific logic must be applied in
1572 * relation to a specific subplatform (G10 or G11), whereas display workarounds
1573 * and stepping-specific logic will be applied with a general DG2-wide stepping
1574 * number.
1575 */
1576#define IS_DG2_GT_STEP(__i915, variant, since, until) \
1577        (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \
1578         IS_GT_STEP(__i915, since, until))
1579
1580#define IS_DG2_DISP_STEP(__i915, since, until) \
1581        (IS_DG2(__i915) && \
1582         IS_DISPLAY_STEP(__i915, since, until))
1583
1584#define IS_LP(dev_priv)         (INTEL_INFO(dev_priv)->is_lp)
1585#define IS_GEN9_LP(dev_priv)    (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
1586#define IS_GEN9_BC(dev_priv)    (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
1587
1588#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
1589#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
1590
1591#define ENGINE_INSTANCES_MASK(gt, first, count) ({              \
1592        unsigned int first__ = (first);                                 \
1593        unsigned int count__ = (count);                                 \
1594        ((gt)->info.engine_mask &                                               \
1595         GENMASK(first__ + count__ - 1, first__)) >> first__;           \
1596})
1597#define VDBOX_MASK(gt) \
1598        ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
1599#define VEBOX_MASK(gt) \
1600        ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
1601
1602/*
1603 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
1604 * All later gens can run the final buffer from the ppgtt
1605 */
1606#define CMDPARSER_USES_GGTT(dev_priv) (GRAPHICS_VER(dev_priv) == 7)
1607
1608#define HAS_LLC(dev_priv)       (INTEL_INFO(dev_priv)->has_llc)
1609#define HAS_SNOOP(dev_priv)     (INTEL_INFO(dev_priv)->has_snoop)
1610#define HAS_EDRAM(dev_priv)     ((dev_priv)->edram_size_mb)
1611#define HAS_SECURE_BATCHES(dev_priv) (GRAPHICS_VER(dev_priv) < 6)
1612#define HAS_WT(dev_priv)        HAS_EDRAM(dev_priv)
1613
1614#define HWS_NEEDS_PHYSICAL(dev_priv)    (INTEL_INFO(dev_priv)->hws_needs_physical)
1615
1616#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
1617                (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
1618#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
1619                (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
1620
1621#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
1622
1623#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
1624#define HAS_PPGTT(dev_priv) \
1625        (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
1626#define HAS_FULL_PPGTT(dev_priv) \
1627        (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
1628
1629#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
1630        GEM_BUG_ON((sizes) == 0); \
1631        ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
1632})
1633
1634#define HAS_OVERLAY(dev_priv)            (INTEL_INFO(dev_priv)->display.has_overlay)
1635#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
1636                (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
1637
1638/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1639#define HAS_BROKEN_CS_TLB(dev_priv)     (IS_I830(dev_priv) || IS_I845G(dev_priv))
1640
1641#define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)   \
1642        (IS_BROADWELL(dev_priv) || GRAPHICS_VER(dev_priv) == 9)
1643
1644/* WaRsDisableCoarsePowerGating:skl,cnl */
1645#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv)                    \
1646        (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
1647
1648#define HAS_GMBUS_IRQ(dev_priv) (GRAPHICS_VER(dev_priv) >= 4)
1649#define HAS_GMBUS_BURST_READ(dev_priv) (GRAPHICS_VER(dev_priv) >= 11 || \
1650                                        IS_GEMINILAKE(dev_priv) || \
1651                                        IS_KABYLAKE(dev_priv))
1652
1653/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1654 * rows, which changed the alignment requirements and fence programming.
1655 */
1656#define HAS_128_BYTE_Y_TILING(dev_priv) (GRAPHICS_VER(dev_priv) != 2 && \
1657                                         !(IS_I915G(dev_priv) || IS_I915GM(dev_priv)))
1658#define SUPPORTS_TV(dev_priv)           (INTEL_INFO(dev_priv)->display.supports_tv)
1659#define I915_HAS_HOTPLUG(dev_priv)      (INTEL_INFO(dev_priv)->display.has_hotplug)
1660
1661#define HAS_FW_BLC(dev_priv)    (GRAPHICS_VER(dev_priv) > 2)
1662#define HAS_FBC(dev_priv)       (INTEL_INFO(dev_priv)->display.has_fbc)
1663#define HAS_CUR_FBC(dev_priv)   (!HAS_GMCH(dev_priv) && GRAPHICS_VER(dev_priv) >= 7)
1664
1665#define HAS_IPS(dev_priv)       (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
1666
1667#define HAS_DP_MST(dev_priv)    (INTEL_INFO(dev_priv)->display.has_dp_mst)
1668
1669#define HAS_CDCLK_CRAWL(dev_priv)        (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
1670#define HAS_DDI(dev_priv)                (INTEL_INFO(dev_priv)->display.has_ddi)
1671#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
1672#define HAS_PSR(dev_priv)                (INTEL_INFO(dev_priv)->display.has_psr)
1673#define HAS_PSR_HW_TRACKING(dev_priv) \
1674        (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
1675#define HAS_PSR2_SEL_FETCH(dev_priv)     (GRAPHICS_VER(dev_priv) >= 12)
1676#define HAS_TRANSCODER(dev_priv, trans)  ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
1677
1678#define HAS_RC6(dev_priv)                (INTEL_INFO(dev_priv)->has_rc6)
1679#define HAS_RC6p(dev_priv)               (INTEL_INFO(dev_priv)->has_rc6p)
1680#define HAS_RC6pp(dev_priv)              (false) /* HW was never validated */
1681
1682#define HAS_RPS(dev_priv)       (INTEL_INFO(dev_priv)->has_rps)
1683
1684#define HAS_DMC(dev_priv)       (INTEL_INFO(dev_priv)->display.has_dmc)
1685
1686#define HAS_MSO(i915)           (GRAPHICS_VER(i915) >= 12)
1687
1688#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
1689#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
1690
1691#define HAS_MSLICES(dev_priv) \
1692        (INTEL_INFO(dev_priv)->has_mslices)
1693
1694#define HAS_IPC(dev_priv)                (INTEL_INFO(dev_priv)->display.has_ipc)
1695
1696#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
1697#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
1698
1699#define HAS_GT_UC(dev_priv)     (INTEL_INFO(dev_priv)->has_gt_uc)
1700
1701#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
1702
1703#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv)     (INTEL_INFO(dev_priv)->has_global_mocs)
1704
1705
1706#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
1707
1708#define HAS_LSPCON(dev_priv) (IS_GRAPHICS_VER(dev_priv, 9, 10))
1709
1710/* DPF == dynamic parity feature */
1711#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
1712#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
1713                                 2 : HAS_L3_DPF(dev_priv))
1714
1715#define GT_FREQUENCY_MULTIPLIER 50
1716#define GEN9_FREQ_SCALER 3
1717
1718#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
1719
1720#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
1721
1722#define HAS_VRR(i915)   (GRAPHICS_VER(i915) >= 12)
1723
1724/* Only valid when HAS_DISPLAY() is true */
1725#define INTEL_DISPLAY_ENABLED(dev_priv) \
1726        (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
1727
1728static inline bool run_as_guest(void)
1729{
1730        return !hypervisor_is_type(X86_HYPER_NATIVE);
1731}
1732
1733#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
1734                                              IS_ALDERLAKE_S(dev_priv))
1735
1736static inline bool intel_vtd_active(void)
1737{
1738#ifdef CONFIG_INTEL_IOMMU
1739        if (intel_iommu_gfx_mapped)
1740                return true;
1741#endif
1742
1743        /* Running as a guest, we assume the host is enforcing VT'd */
1744        return run_as_guest();
1745}
1746
1747static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
1748{
1749        return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active();
1750}
1751
1752static inline bool
1753intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
1754{
1755        return IS_BROXTON(i915) && intel_vtd_active();
1756}
1757
1758static inline bool
1759intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
1760{
1761        return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
1762}
1763
1764/* i915_drv.c */
1765extern const struct dev_pm_ops i915_pm_ops;
1766
1767int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
1768void i915_driver_remove(struct drm_i915_private *i915);
1769void i915_driver_shutdown(struct drm_i915_private *i915);
1770
1771int i915_resume_switcheroo(struct drm_i915_private *i915);
1772int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
1773
1774int i915_getparam_ioctl(struct drm_device *dev, void *data,
1775                        struct drm_file *file_priv);
1776
1777/* i915_gem.c */
1778int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
1779void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
1780void i915_gem_init_early(struct drm_i915_private *dev_priv);
1781void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
1782
1783static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
1784{
1785        /*
1786         * A single pass should suffice to release all the freed objects (along
1787         * most call paths) , but be a little more paranoid in that freeing
1788         * the objects does take a little amount of time, during which the rcu
1789         * callbacks could have added new objects into the freed list, and
1790         * armed the work again.
1791         */
1792        while (atomic_read(&i915->mm.free_count)) {
1793                flush_work(&i915->mm.free_work);
1794                rcu_barrier();
1795        }
1796}
1797
1798static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
1799{
1800        /*
1801         * Similar to objects above (see i915_gem_drain_freed-objects), in
1802         * general we have workers that are armed by RCU and then rearm
1803         * themselves in their callbacks. To be paranoid, we need to
1804         * drain the workqueue a second time after waiting for the RCU
1805         * grace period so that we catch work queued via RCU from the first
1806         * pass. As neither drain_workqueue() nor flush_workqueue() report
1807         * a result, we make an assumption that we only don't require more
1808         * than 3 passes to catch all _recursive_ RCU delayed work.
1809         *
1810         */
1811        int pass = 3;
1812        do {
1813                flush_workqueue(i915->wq);
1814                rcu_barrier();
1815                i915_gem_drain_freed_objects(i915);
1816        } while (--pass);
1817        drain_workqueue(i915->wq);
1818}
1819
1820struct i915_vma * __must_check
1821i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
1822                            struct i915_gem_ww_ctx *ww,
1823                            const struct i915_ggtt_view *view,
1824                            u64 size, u64 alignment, u64 flags);
1825
1826static inline struct i915_vma * __must_check
1827i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1828                         const struct i915_ggtt_view *view,
1829                         u64 size, u64 alignment, u64 flags)
1830{
1831        return i915_gem_object_ggtt_pin_ww(obj, NULL, view, size, alignment, flags);
1832}
1833
1834int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
1835                           unsigned long flags);
1836#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
1837#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
1838#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
1839#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
1840
1841void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
1842
1843int i915_gem_dumb_create(struct drm_file *file_priv,
1844                         struct drm_device *dev,
1845                         struct drm_mode_create_dumb *args);
1846
1847int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
1848
1849static inline u32 i915_reset_count(struct i915_gpu_error *error)
1850{
1851        return atomic_read(&error->reset_count);
1852}
1853
1854static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
1855                                          const struct intel_engine_cs *engine)
1856{
1857        return atomic_read(&error->reset_engine_count[engine->uabi_class]);
1858}
1859
1860int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
1861void i915_gem_driver_register(struct drm_i915_private *i915);
1862void i915_gem_driver_unregister(struct drm_i915_private *i915);
1863void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
1864void i915_gem_driver_release(struct drm_i915_private *dev_priv);
1865void i915_gem_suspend(struct drm_i915_private *dev_priv);
1866void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
1867void i915_gem_resume(struct drm_i915_private *dev_priv);
1868
1869int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
1870
1871int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1872                                    enum i915_cache_level cache_level);
1873
1874struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1875                                struct dma_buf *dma_buf);
1876
1877struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
1878
1879static inline struct i915_address_space *
1880i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
1881{
1882        struct i915_address_space *vm;
1883
1884        rcu_read_lock();
1885        vm = xa_load(&file_priv->vm_xa, id);
1886        if (vm && !kref_get_unless_zero(&vm->ref))
1887                vm = NULL;
1888        rcu_read_unlock();
1889
1890        return vm;
1891}
1892
1893/* i915_gem_evict.c */
1894int __must_check i915_gem_evict_something(struct i915_address_space *vm,
1895                                          u64 min_size, u64 alignment,
1896                                          unsigned long color,
1897                                          u64 start, u64 end,
1898                                          unsigned flags);
1899int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
1900                                         struct drm_mm_node *node,
1901                                         unsigned int flags);
1902int i915_gem_evict_vm(struct i915_address_space *vm);
1903
1904/* i915_gem_internal.c */
1905struct drm_i915_gem_object *
1906i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
1907                                phys_addr_t size);
1908
1909/* i915_gem_tiling.c */
1910static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1911{
1912        struct drm_i915_private *i915 = to_i915(obj->base.dev);
1913
1914        return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1915                i915_gem_object_is_tiled(obj);
1916}
1917
1918u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
1919                        unsigned int tiling, unsigned int stride);
1920u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
1921                             unsigned int tiling, unsigned int stride);
1922
1923const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
1924
1925/* i915_cmd_parser.c */
1926int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
1927int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
1928void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
1929int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1930                            struct i915_vma *batch,
1931                            unsigned long batch_offset,
1932                            unsigned long batch_length,
1933                            struct i915_vma *shadow,
1934                            bool trampoline);
1935#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
1936
1937/* intel_device_info.c */
1938static inline struct intel_device_info *
1939mkwrite_device_info(struct drm_i915_private *dev_priv)
1940{
1941        return (struct intel_device_info *)INTEL_INFO(dev_priv);
1942}
1943
1944int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1945                        struct drm_file *file);
1946
1947/* i915_mm.c */
1948int remap_io_mapping(struct vm_area_struct *vma,
1949                     unsigned long addr, unsigned long pfn, unsigned long size,
1950                     struct io_mapping *iomap);
1951int remap_io_sg(struct vm_area_struct *vma,
1952                unsigned long addr, unsigned long size,
1953                struct scatterlist *sgl, resource_size_t iobase);
1954
1955static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
1956{
1957        if (GRAPHICS_VER(i915) >= 11)
1958                return ICL_HWS_CSB_WRITE_INDEX;
1959        else
1960                return I915_HWS_CSB_WRITE_INDEX;
1961}
1962
1963static inline enum i915_map_type
1964i915_coherent_map_type(struct drm_i915_private *i915,
1965                       struct drm_i915_gem_object *obj, bool always_coherent)
1966{
1967        if (i915_gem_object_is_lmem(obj))
1968                return I915_MAP_WC;
1969        if (HAS_LLC(i915) || always_coherent)
1970                return I915_MAP_WB;
1971        else
1972                return I915_MAP_WC;
1973}
1974
1975#endif
1976