linux/tools/include/uapi/drm/i915_drm.h
<<
>>
Prefs
   1/*
   2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial portions
  15 * of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#ifndef _UAPI_I915_DRM_H_
  28#define _UAPI_I915_DRM_H_
  29
  30#include "drm.h"
  31
  32#if defined(__cplusplus)
  33extern "C" {
  34#endif
  35
  36/* Please note that modifications to all structs defined here are
  37 * subject to backwards-compatibility constraints.
  38 */
  39
  40/**
  41 * DOC: uevents generated by i915 on it's device node
  42 *
  43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
  44 *      event from the gpu l3 cache. Additional information supplied is ROW,
  45 *      BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
  46 *      track of these events and if a specific cache-line seems to have a
  47 *      persistent error remap it with the l3 remapping tool supplied in
  48 *      intel-gpu-tools.  The value supplied with the event is always 1.
  49 *
  50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
  51 *      hangcheck. The error detection event is a good indicator of when things
  52 *      began to go badly. The value supplied with the event is a 1 upon error
  53 *      detection, and a 0 upon reset completion, signifying no more error
  54 *      exists. NOTE: Disabling hangcheck or reset via module parameter will
  55 *      cause the related events to not be seen.
  56 *
  57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
  58 *      the GPU. The value supplied with the event is always 1. NOTE: Disable
  59 *      reset via module parameter will cause this event to not be seen.
  60 */
  61#define I915_L3_PARITY_UEVENT           "L3_PARITY_ERROR"
  62#define I915_ERROR_UEVENT               "ERROR"
  63#define I915_RESET_UEVENT               "RESET"
  64
  65/*
  66 * MOCS indexes used for GPU surfaces, defining the cacheability of the
  67 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
  68 */
  69enum i915_mocs_table_index {
  70        /*
  71         * Not cached anywhere, coherency between CPU and GPU accesses is
  72         * guaranteed.
  73         */
  74        I915_MOCS_UNCACHED,
  75        /*
  76         * Cacheability and coherency controlled by the kernel automatically
  77         * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
  78         * usage of the surface (used for display scanout or not).
  79         */
  80        I915_MOCS_PTE,
  81        /*
  82         * Cached in all GPU caches available on the platform.
  83         * Coherency between CPU and GPU accesses to the surface is not
  84         * guaranteed without extra synchronization.
  85         */
  86        I915_MOCS_CACHED,
  87};
  88
  89/* Each region is a minimum of 16k, and there are at most 255 of them.
  90 */
  91#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
  92                                 * of chars for next/prev indices */
  93#define I915_LOG_MIN_TEX_REGION_SIZE 14
  94
  95typedef struct _drm_i915_init {
  96        enum {
  97                I915_INIT_DMA = 0x01,
  98                I915_CLEANUP_DMA = 0x02,
  99                I915_RESUME_DMA = 0x03
 100        } func;
 101        unsigned int mmio_offset;
 102        int sarea_priv_offset;
 103        unsigned int ring_start;
 104        unsigned int ring_end;
 105        unsigned int ring_size;
 106        unsigned int front_offset;
 107        unsigned int back_offset;
 108        unsigned int depth_offset;
 109        unsigned int w;
 110        unsigned int h;
 111        unsigned int pitch;
 112        unsigned int pitch_bits;
 113        unsigned int back_pitch;
 114        unsigned int depth_pitch;
 115        unsigned int cpp;
 116        unsigned int chipset;
 117} drm_i915_init_t;
 118
 119typedef struct _drm_i915_sarea {
 120        struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
 121        int last_upload;        /* last time texture was uploaded */
 122        int last_enqueue;       /* last time a buffer was enqueued */
 123        int last_dispatch;      /* age of the most recently dispatched buffer */
 124        int ctxOwner;           /* last context to upload state */
 125        int texAge;
 126        int pf_enabled;         /* is pageflipping allowed? */
 127        int pf_active;
 128        int pf_current_page;    /* which buffer is being displayed? */
 129        int perf_boxes;         /* performance boxes to be displayed */
 130        int width, height;      /* screen size in pixels */
 131
 132        drm_handle_t front_handle;
 133        int front_offset;
 134        int front_size;
 135
 136        drm_handle_t back_handle;
 137        int back_offset;
 138        int back_size;
 139
 140        drm_handle_t depth_handle;
 141        int depth_offset;
 142        int depth_size;
 143
 144        drm_handle_t tex_handle;
 145        int tex_offset;
 146        int tex_size;
 147        int log_tex_granularity;
 148        int pitch;
 149        int rotation;           /* 0, 90, 180 or 270 */
 150        int rotated_offset;
 151        int rotated_size;
 152        int rotated_pitch;
 153        int virtualX, virtualY;
 154
 155        unsigned int front_tiled;
 156        unsigned int back_tiled;
 157        unsigned int depth_tiled;
 158        unsigned int rotated_tiled;
 159        unsigned int rotated2_tiled;
 160
 161        int pipeA_x;
 162        int pipeA_y;
 163        int pipeA_w;
 164        int pipeA_h;
 165        int pipeB_x;
 166        int pipeB_y;
 167        int pipeB_w;
 168        int pipeB_h;
 169
 170        /* fill out some space for old userspace triple buffer */
 171        drm_handle_t unused_handle;
 172        __u32 unused1, unused2, unused3;
 173
 174        /* buffer object handles for static buffers. May change
 175         * over the lifetime of the client.
 176         */
 177        __u32 front_bo_handle;
 178        __u32 back_bo_handle;
 179        __u32 unused_bo_handle;
 180        __u32 depth_bo_handle;
 181
 182} drm_i915_sarea_t;
 183
 184/* due to userspace building against these headers we need some compat here */
 185#define planeA_x pipeA_x
 186#define planeA_y pipeA_y
 187#define planeA_w pipeA_w
 188#define planeA_h pipeA_h
 189#define planeB_x pipeB_x
 190#define planeB_y pipeB_y
 191#define planeB_w pipeB_w
 192#define planeB_h pipeB_h
 193
 194/* Flags for perf_boxes
 195 */
 196#define I915_BOX_RING_EMPTY    0x1
 197#define I915_BOX_FLIP          0x2
 198#define I915_BOX_WAIT          0x4
 199#define I915_BOX_TEXTURE_LOAD  0x8
 200#define I915_BOX_LOST_CONTEXT  0x10
 201
 202/*
 203 * i915 specific ioctls.
 204 *
 205 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
 206 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
 207 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
 208 */
 209#define DRM_I915_INIT           0x00
 210#define DRM_I915_FLUSH          0x01
 211#define DRM_I915_FLIP           0x02
 212#define DRM_I915_BATCHBUFFER    0x03
 213#define DRM_I915_IRQ_EMIT       0x04
 214#define DRM_I915_IRQ_WAIT       0x05
 215#define DRM_I915_GETPARAM       0x06
 216#define DRM_I915_SETPARAM       0x07
 217#define DRM_I915_ALLOC          0x08
 218#define DRM_I915_FREE           0x09
 219#define DRM_I915_INIT_HEAP      0x0a
 220#define DRM_I915_CMDBUFFER      0x0b
 221#define DRM_I915_DESTROY_HEAP   0x0c
 222#define DRM_I915_SET_VBLANK_PIPE        0x0d
 223#define DRM_I915_GET_VBLANK_PIPE        0x0e
 224#define DRM_I915_VBLANK_SWAP    0x0f
 225#define DRM_I915_HWS_ADDR       0x11
 226#define DRM_I915_GEM_INIT       0x13
 227#define DRM_I915_GEM_EXECBUFFER 0x14
 228#define DRM_I915_GEM_PIN        0x15
 229#define DRM_I915_GEM_UNPIN      0x16
 230#define DRM_I915_GEM_BUSY       0x17
 231#define DRM_I915_GEM_THROTTLE   0x18
 232#define DRM_I915_GEM_ENTERVT    0x19
 233#define DRM_I915_GEM_LEAVEVT    0x1a
 234#define DRM_I915_GEM_CREATE     0x1b
 235#define DRM_I915_GEM_PREAD      0x1c
 236#define DRM_I915_GEM_PWRITE     0x1d
 237#define DRM_I915_GEM_MMAP       0x1e
 238#define DRM_I915_GEM_SET_DOMAIN 0x1f
 239#define DRM_I915_GEM_SW_FINISH  0x20
 240#define DRM_I915_GEM_SET_TILING 0x21
 241#define DRM_I915_GEM_GET_TILING 0x22
 242#define DRM_I915_GEM_GET_APERTURE 0x23
 243#define DRM_I915_GEM_MMAP_GTT   0x24
 244#define DRM_I915_GET_PIPE_FROM_CRTC_ID  0x25
 245#define DRM_I915_GEM_MADVISE    0x26
 246#define DRM_I915_OVERLAY_PUT_IMAGE      0x27
 247#define DRM_I915_OVERLAY_ATTRS  0x28
 248#define DRM_I915_GEM_EXECBUFFER2        0x29
 249#define DRM_I915_GEM_EXECBUFFER2_WR     DRM_I915_GEM_EXECBUFFER2
 250#define DRM_I915_GET_SPRITE_COLORKEY    0x2a
 251#define DRM_I915_SET_SPRITE_COLORKEY    0x2b
 252#define DRM_I915_GEM_WAIT       0x2c
 253#define DRM_I915_GEM_CONTEXT_CREATE     0x2d
 254#define DRM_I915_GEM_CONTEXT_DESTROY    0x2e
 255#define DRM_I915_GEM_SET_CACHING        0x2f
 256#define DRM_I915_GEM_GET_CACHING        0x30
 257#define DRM_I915_REG_READ               0x31
 258#define DRM_I915_GET_RESET_STATS        0x32
 259#define DRM_I915_GEM_USERPTR            0x33
 260#define DRM_I915_GEM_CONTEXT_GETPARAM   0x34
 261#define DRM_I915_GEM_CONTEXT_SETPARAM   0x35
 262#define DRM_I915_PERF_OPEN              0x36
 263
 264#define DRM_IOCTL_I915_INIT             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 265#define DRM_IOCTL_I915_FLUSH            DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
 266#define DRM_IOCTL_I915_FLIP             DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
 267#define DRM_IOCTL_I915_BATCHBUFFER      DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
 268#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
 269#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
 270#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
 271#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
 272#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
 273#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
 274#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
 275#define DRM_IOCTL_I915_CMDBUFFER        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
 276#define DRM_IOCTL_I915_DESTROY_HEAP     DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
 277#define DRM_IOCTL_I915_SET_VBLANK_PIPE  DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 278#define DRM_IOCTL_I915_GET_VBLANK_PIPE  DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 279#define DRM_IOCTL_I915_VBLANK_SWAP      DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
 280#define DRM_IOCTL_I915_HWS_ADDR         DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
 281#define DRM_IOCTL_I915_GEM_INIT         DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
 282#define DRM_IOCTL_I915_GEM_EXECBUFFER   DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
 283#define DRM_IOCTL_I915_GEM_EXECBUFFER2  DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
 284#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR       DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
 285#define DRM_IOCTL_I915_GEM_PIN          DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
 286#define DRM_IOCTL_I915_GEM_UNPIN        DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
 287#define DRM_IOCTL_I915_GEM_BUSY         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
 288#define DRM_IOCTL_I915_GEM_SET_CACHING          DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
 289#define DRM_IOCTL_I915_GEM_GET_CACHING          DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
 290#define DRM_IOCTL_I915_GEM_THROTTLE     DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
 291#define DRM_IOCTL_I915_GEM_ENTERVT      DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
 292#define DRM_IOCTL_I915_GEM_LEAVEVT      DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
 293#define DRM_IOCTL_I915_GEM_CREATE       DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
 294#define DRM_IOCTL_I915_GEM_PREAD        DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
 295#define DRM_IOCTL_I915_GEM_PWRITE       DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
 296#define DRM_IOCTL_I915_GEM_MMAP         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
 297#define DRM_IOCTL_I915_GEM_MMAP_GTT     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
 298#define DRM_IOCTL_I915_GEM_SET_DOMAIN   DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
 299#define DRM_IOCTL_I915_GEM_SW_FINISH    DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
 300#define DRM_IOCTL_I915_GEM_SET_TILING   DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
 301#define DRM_IOCTL_I915_GEM_GET_TILING   DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
 302#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
 303#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
 304#define DRM_IOCTL_I915_GEM_MADVISE      DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
 305#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE        DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
 306#define DRM_IOCTL_I915_OVERLAY_ATTRS    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
 307#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 308#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 309#define DRM_IOCTL_I915_GEM_WAIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
 310#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE       DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
 311#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY      DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
 312#define DRM_IOCTL_I915_REG_READ                 DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
 313#define DRM_IOCTL_I915_GET_RESET_STATS          DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
 314#define DRM_IOCTL_I915_GEM_USERPTR                      DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
 315#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM     DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
 316#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM     DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
 317#define DRM_IOCTL_I915_PERF_OPEN        DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
 318
 319/* Allow drivers to submit batchbuffers directly to hardware, relying
 320 * on the security mechanisms provided by hardware.
 321 */
 322typedef struct drm_i915_batchbuffer {
 323        int start;              /* agp offset */
 324        int used;               /* nr bytes in use */
 325        int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
 326        int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
 327        int num_cliprects;      /* mulitpass with multiple cliprects? */
 328        struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
 329} drm_i915_batchbuffer_t;
 330
 331/* As above, but pass a pointer to userspace buffer which can be
 332 * validated by the kernel prior to sending to hardware.
 333 */
 334typedef struct _drm_i915_cmdbuffer {
 335        char __user *buf;       /* pointer to userspace command buffer */
 336        int sz;                 /* nr bytes in buf */
 337        int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
 338        int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
 339        int num_cliprects;      /* mulitpass with multiple cliprects? */
 340        struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
 341} drm_i915_cmdbuffer_t;
 342
 343/* Userspace can request & wait on irq's:
 344 */
 345typedef struct drm_i915_irq_emit {
 346        int __user *irq_seq;
 347} drm_i915_irq_emit_t;
 348
 349typedef struct drm_i915_irq_wait {
 350        int irq_seq;
 351} drm_i915_irq_wait_t;
 352
 353/* Ioctl to query kernel params:
 354 */
 355#define I915_PARAM_IRQ_ACTIVE            1
 356#define I915_PARAM_ALLOW_BATCHBUFFER     2
 357#define I915_PARAM_LAST_DISPATCH         3
 358#define I915_PARAM_CHIPSET_ID            4
 359#define I915_PARAM_HAS_GEM               5
 360#define I915_PARAM_NUM_FENCES_AVAIL      6
 361#define I915_PARAM_HAS_OVERLAY           7
 362#define I915_PARAM_HAS_PAGEFLIPPING      8
 363#define I915_PARAM_HAS_EXECBUF2          9
 364#define I915_PARAM_HAS_BSD               10
 365#define I915_PARAM_HAS_BLT               11
 366#define I915_PARAM_HAS_RELAXED_FENCING   12
 367#define I915_PARAM_HAS_COHERENT_RINGS    13
 368#define I915_PARAM_HAS_EXEC_CONSTANTS    14
 369#define I915_PARAM_HAS_RELAXED_DELTA     15
 370#define I915_PARAM_HAS_GEN7_SOL_RESET    16
 371#define I915_PARAM_HAS_LLC               17
 372#define I915_PARAM_HAS_ALIASING_PPGTT    18
 373#define I915_PARAM_HAS_WAIT_TIMEOUT      19
 374#define I915_PARAM_HAS_SEMAPHORES        20
 375#define I915_PARAM_HAS_PRIME_VMAP_FLUSH  21
 376#define I915_PARAM_HAS_VEBOX             22
 377#define I915_PARAM_HAS_SECURE_BATCHES    23
 378#define I915_PARAM_HAS_PINNED_BATCHES    24
 379#define I915_PARAM_HAS_EXEC_NO_RELOC     25
 380#define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
 381#define I915_PARAM_HAS_WT                27
 382#define I915_PARAM_CMD_PARSER_VERSION    28
 383#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
 384#define I915_PARAM_MMAP_VERSION          30
 385#define I915_PARAM_HAS_BSD2              31
 386#define I915_PARAM_REVISION              32
 387#define I915_PARAM_SUBSLICE_TOTAL        33
 388#define I915_PARAM_EU_TOTAL              34
 389#define I915_PARAM_HAS_GPU_RESET         35
 390#define I915_PARAM_HAS_RESOURCE_STREAMER 36
 391#define I915_PARAM_HAS_EXEC_SOFTPIN      37
 392#define I915_PARAM_HAS_POOLED_EU         38
 393#define I915_PARAM_MIN_EU_IN_POOL        39
 394#define I915_PARAM_MMAP_GTT_VERSION      40
 395
 396/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
 397 * priorities and the driver will attempt to execute batches in priority order.
 398 */
 399#define I915_PARAM_HAS_SCHEDULER         41
 400#define I915_PARAM_HUC_STATUS            42
 401
 402/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
 403 * synchronisation with implicit fencing on individual objects.
 404 * See EXEC_OBJECT_ASYNC.
 405 */
 406#define I915_PARAM_HAS_EXEC_ASYNC        43
 407
 408/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
 409 * both being able to pass in a sync_file fd to wait upon before executing,
 410 * and being able to return a new sync_file fd that is signaled when the
 411 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
 412 */
 413#define I915_PARAM_HAS_EXEC_FENCE        44
 414
 415/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
 416 * user specified bufffers for post-mortem debugging of GPU hangs. See
 417 * EXEC_OBJECT_CAPTURE.
 418 */
 419#define I915_PARAM_HAS_EXEC_CAPTURE      45
 420
 421#define I915_PARAM_SLICE_MASK            46
 422
 423/* Assuming it's uniform for each slice, this queries the mask of subslices
 424 * per-slice for this system.
 425 */
 426#define I915_PARAM_SUBSLICE_MASK         47
 427
 428/*
 429 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
 430 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
 431 */
 432#define I915_PARAM_HAS_EXEC_BATCH_FIRST  48
 433
 434typedef struct drm_i915_getparam {
 435        __s32 param;
 436        /*
 437         * WARNING: Using pointers instead of fixed-size u64 means we need to write
 438         * compat32 code. Don't repeat this mistake.
 439         */
 440        int __user *value;
 441} drm_i915_getparam_t;
 442
 443/* Ioctl to set kernel params:
 444 */
 445#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
 446#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
 447#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
 448#define I915_SETPARAM_NUM_USED_FENCES                     4
 449
 450typedef struct drm_i915_setparam {
 451        int param;
 452        int value;
 453} drm_i915_setparam_t;
 454
 455/* A memory manager for regions of shared memory:
 456 */
 457#define I915_MEM_REGION_AGP 1
 458
 459typedef struct drm_i915_mem_alloc {
 460        int region;
 461        int alignment;
 462        int size;
 463        int __user *region_offset;      /* offset from start of fb or agp */
 464} drm_i915_mem_alloc_t;
 465
 466typedef struct drm_i915_mem_free {
 467        int region;
 468        int region_offset;
 469} drm_i915_mem_free_t;
 470
 471typedef struct drm_i915_mem_init_heap {
 472        int region;
 473        int size;
 474        int start;
 475} drm_i915_mem_init_heap_t;
 476
 477/* Allow memory manager to be torn down and re-initialized (eg on
 478 * rotate):
 479 */
 480typedef struct drm_i915_mem_destroy_heap {
 481        int region;
 482} drm_i915_mem_destroy_heap_t;
 483
 484/* Allow X server to configure which pipes to monitor for vblank signals
 485 */
 486#define DRM_I915_VBLANK_PIPE_A  1
 487#define DRM_I915_VBLANK_PIPE_B  2
 488
 489typedef struct drm_i915_vblank_pipe {
 490        int pipe;
 491} drm_i915_vblank_pipe_t;
 492
 493/* Schedule buffer swap at given vertical blank:
 494 */
 495typedef struct drm_i915_vblank_swap {
 496        drm_drawable_t drawable;
 497        enum drm_vblank_seq_type seqtype;
 498        unsigned int sequence;
 499} drm_i915_vblank_swap_t;
 500
 501typedef struct drm_i915_hws_addr {
 502        __u64 addr;
 503} drm_i915_hws_addr_t;
 504
 505struct drm_i915_gem_init {
 506        /**
 507         * Beginning offset in the GTT to be managed by the DRM memory
 508         * manager.
 509         */
 510        __u64 gtt_start;
 511        /**
 512         * Ending offset in the GTT to be managed by the DRM memory
 513         * manager.
 514         */
 515        __u64 gtt_end;
 516};
 517
 518struct drm_i915_gem_create {
 519        /**
 520         * Requested size for the object.
 521         *
 522         * The (page-aligned) allocated size for the object will be returned.
 523         */
 524        __u64 size;
 525        /**
 526         * Returned handle for the object.
 527         *
 528         * Object handles are nonzero.
 529         */
 530        __u32 handle;
 531        __u32 pad;
 532};
 533
 534struct drm_i915_gem_pread {
 535        /** Handle for the object being read. */
 536        __u32 handle;
 537        __u32 pad;
 538        /** Offset into the object to read from */
 539        __u64 offset;
 540        /** Length of data to read */
 541        __u64 size;
 542        /**
 543         * Pointer to write the data into.
 544         *
 545         * This is a fixed-size type for 32/64 compatibility.
 546         */
 547        __u64 data_ptr;
 548};
 549
 550struct drm_i915_gem_pwrite {
 551        /** Handle for the object being written to. */
 552        __u32 handle;
 553        __u32 pad;
 554        /** Offset into the object to write to */
 555        __u64 offset;
 556        /** Length of data to write */
 557        __u64 size;
 558        /**
 559         * Pointer to read the data from.
 560         *
 561         * This is a fixed-size type for 32/64 compatibility.
 562         */
 563        __u64 data_ptr;
 564};
 565
 566struct drm_i915_gem_mmap {
 567        /** Handle for the object being mapped. */
 568        __u32 handle;
 569        __u32 pad;
 570        /** Offset in the object to map. */
 571        __u64 offset;
 572        /**
 573         * Length of data to map.
 574         *
 575         * The value will be page-aligned.
 576         */
 577        __u64 size;
 578        /**
 579         * Returned pointer the data was mapped at.
 580         *
 581         * This is a fixed-size type for 32/64 compatibility.
 582         */
 583        __u64 addr_ptr;
 584
 585        /**
 586         * Flags for extended behaviour.
 587         *
 588         * Added in version 2.
 589         */
 590        __u64 flags;
 591#define I915_MMAP_WC 0x1
 592};
 593
 594struct drm_i915_gem_mmap_gtt {
 595        /** Handle for the object being mapped. */
 596        __u32 handle;
 597        __u32 pad;
 598        /**
 599         * Fake offset to use for subsequent mmap call
 600         *
 601         * This is a fixed-size type for 32/64 compatibility.
 602         */
 603        __u64 offset;
 604};
 605
 606struct drm_i915_gem_set_domain {
 607        /** Handle for the object */
 608        __u32 handle;
 609
 610        /** New read domains */
 611        __u32 read_domains;
 612
 613        /** New write domain */
 614        __u32 write_domain;
 615};
 616
 617struct drm_i915_gem_sw_finish {
 618        /** Handle for the object */
 619        __u32 handle;
 620};
 621
 622struct drm_i915_gem_relocation_entry {
 623        /**
 624         * Handle of the buffer being pointed to by this relocation entry.
 625         *
 626         * It's appealing to make this be an index into the mm_validate_entry
 627         * list to refer to the buffer, but this allows the driver to create
 628         * a relocation list for state buffers and not re-write it per
 629         * exec using the buffer.
 630         */
 631        __u32 target_handle;
 632
 633        /**
 634         * Value to be added to the offset of the target buffer to make up
 635         * the relocation entry.
 636         */
 637        __u32 delta;
 638
 639        /** Offset in the buffer the relocation entry will be written into */
 640        __u64 offset;
 641
 642        /**
 643         * Offset value of the target buffer that the relocation entry was last
 644         * written as.
 645         *
 646         * If the buffer has the same offset as last time, we can skip syncing
 647         * and writing the relocation.  This value is written back out by
 648         * the execbuffer ioctl when the relocation is written.
 649         */
 650        __u64 presumed_offset;
 651
 652        /**
 653         * Target memory domains read by this operation.
 654         */
 655        __u32 read_domains;
 656
 657        /**
 658         * Target memory domains written by this operation.
 659         *
 660         * Note that only one domain may be written by the whole
 661         * execbuffer operation, so that where there are conflicts,
 662         * the application will get -EINVAL back.
 663         */
 664        __u32 write_domain;
 665};
 666
 667/** @{
 668 * Intel memory domains
 669 *
 670 * Most of these just align with the various caches in
 671 * the system and are used to flush and invalidate as
 672 * objects end up cached in different domains.
 673 */
 674/** CPU cache */
 675#define I915_GEM_DOMAIN_CPU             0x00000001
 676/** Render cache, used by 2D and 3D drawing */
 677#define I915_GEM_DOMAIN_RENDER          0x00000002
 678/** Sampler cache, used by texture engine */
 679#define I915_GEM_DOMAIN_SAMPLER         0x00000004
 680/** Command queue, used to load batch buffers */
 681#define I915_GEM_DOMAIN_COMMAND         0x00000008
 682/** Instruction cache, used by shader programs */
 683#define I915_GEM_DOMAIN_INSTRUCTION     0x00000010
 684/** Vertex address cache */
 685#define I915_GEM_DOMAIN_VERTEX          0x00000020
 686/** GTT domain - aperture and scanout */
 687#define I915_GEM_DOMAIN_GTT             0x00000040
 688/** WC domain - uncached access */
 689#define I915_GEM_DOMAIN_WC              0x00000080
 690/** @} */
 691
 692struct drm_i915_gem_exec_object {
 693        /**
 694         * User's handle for a buffer to be bound into the GTT for this
 695         * operation.
 696         */
 697        __u32 handle;
 698
 699        /** Number of relocations to be performed on this buffer */
 700        __u32 relocation_count;
 701        /**
 702         * Pointer to array of struct drm_i915_gem_relocation_entry containing
 703         * the relocations to be performed in this buffer.
 704         */
 705        __u64 relocs_ptr;
 706
 707        /** Required alignment in graphics aperture */
 708        __u64 alignment;
 709
 710        /**
 711         * Returned value of the updated offset of the object, for future
 712         * presumed_offset writes.
 713         */
 714        __u64 offset;
 715};
 716
 717struct drm_i915_gem_execbuffer {
 718        /**
 719         * List of buffers to be validated with their relocations to be
 720         * performend on them.
 721         *
 722         * This is a pointer to an array of struct drm_i915_gem_validate_entry.
 723         *
 724         * These buffers must be listed in an order such that all relocations
 725         * a buffer is performing refer to buffers that have already appeared
 726         * in the validate list.
 727         */
 728        __u64 buffers_ptr;
 729        __u32 buffer_count;
 730
 731        /** Offset in the batchbuffer to start execution from. */
 732        __u32 batch_start_offset;
 733        /** Bytes used in batchbuffer from batch_start_offset */
 734        __u32 batch_len;
 735        __u32 DR1;
 736        __u32 DR4;
 737        __u32 num_cliprects;
 738        /** This is a struct drm_clip_rect *cliprects */
 739        __u64 cliprects_ptr;
 740};
 741
 742struct drm_i915_gem_exec_object2 {
 743        /**
 744         * User's handle for a buffer to be bound into the GTT for this
 745         * operation.
 746         */
 747        __u32 handle;
 748
 749        /** Number of relocations to be performed on this buffer */
 750        __u32 relocation_count;
 751        /**
 752         * Pointer to array of struct drm_i915_gem_relocation_entry containing
 753         * the relocations to be performed in this buffer.
 754         */
 755        __u64 relocs_ptr;
 756
 757        /** Required alignment in graphics aperture */
 758        __u64 alignment;
 759
 760        /**
 761         * When the EXEC_OBJECT_PINNED flag is specified this is populated by
 762         * the user with the GTT offset at which this object will be pinned.
 763         * When the I915_EXEC_NO_RELOC flag is specified this must contain the
 764         * presumed_offset of the object.
 765         * During execbuffer2 the kernel populates it with the value of the
 766         * current GTT offset of the object, for future presumed_offset writes.
 767         */
 768        __u64 offset;
 769
 770#define EXEC_OBJECT_NEEDS_FENCE          (1<<0)
 771#define EXEC_OBJECT_NEEDS_GTT            (1<<1)
 772#define EXEC_OBJECT_WRITE                (1<<2)
 773#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
 774#define EXEC_OBJECT_PINNED               (1<<4)
 775#define EXEC_OBJECT_PAD_TO_SIZE          (1<<5)
 776/* The kernel implicitly tracks GPU activity on all GEM objects, and
 777 * synchronises operations with outstanding rendering. This includes
 778 * rendering on other devices if exported via dma-buf. However, sometimes
 779 * this tracking is too coarse and the user knows better. For example,
 780 * if the object is split into non-overlapping ranges shared between different
 781 * clients or engines (i.e. suballocating objects), the implicit tracking
 782 * by kernel assumes that each operation affects the whole object rather
 783 * than an individual range, causing needless synchronisation between clients.
 784 * The kernel will also forgo any CPU cache flushes prior to rendering from
 785 * the object as the client is expected to be also handling such domain
 786 * tracking.
 787 *
 788 * The kernel maintains the implicit tracking in order to manage resources
 789 * used by the GPU - this flag only disables the synchronisation prior to
 790 * rendering with this object in this execbuf.
 791 *
 792 * Opting out of implicit synhronisation requires the user to do its own
 793 * explicit tracking to avoid rendering corruption. See, for example,
 794 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
 795 */
 796#define EXEC_OBJECT_ASYNC               (1<<6)
 797/* Request that the contents of this execobject be copied into the error
 798 * state upon a GPU hang involving this batch for post-mortem debugging.
 799 * These buffers are recorded in no particular order as "user" in
 800 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
 801 * if the kernel supports this flag.
 802 */
 803#define EXEC_OBJECT_CAPTURE             (1<<7)
 804/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
 805#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
 806        __u64 flags;
 807
 808        union {
 809                __u64 rsvd1;
 810                __u64 pad_to_size;
 811        };
 812        __u64 rsvd2;
 813};
 814
 815struct drm_i915_gem_execbuffer2 {
 816        /**
 817         * List of gem_exec_object2 structs
 818         */
 819        __u64 buffers_ptr;
 820        __u32 buffer_count;
 821
 822        /** Offset in the batchbuffer to start execution from. */
 823        __u32 batch_start_offset;
 824        /** Bytes used in batchbuffer from batch_start_offset */
 825        __u32 batch_len;
 826        __u32 DR1;
 827        __u32 DR4;
 828        __u32 num_cliprects;
 829        /** This is a struct drm_clip_rect *cliprects */
 830        __u64 cliprects_ptr;
 831#define I915_EXEC_RING_MASK              (7<<0)
 832#define I915_EXEC_DEFAULT                (0<<0)
 833#define I915_EXEC_RENDER                 (1<<0)
 834#define I915_EXEC_BSD                    (2<<0)
 835#define I915_EXEC_BLT                    (3<<0)
 836#define I915_EXEC_VEBOX                  (4<<0)
 837
 838/* Used for switching the constants addressing mode on gen4+ RENDER ring.
 839 * Gen6+ only supports relative addressing to dynamic state (default) and
 840 * absolute addressing.
 841 *
 842 * These flags are ignored for the BSD and BLT rings.
 843 */
 844#define I915_EXEC_CONSTANTS_MASK        (3<<6)
 845#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
 846#define I915_EXEC_CONSTANTS_ABSOLUTE    (1<<6)
 847#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
 848        __u64 flags;
 849        __u64 rsvd1; /* now used for context info */
 850        __u64 rsvd2;
 851};
 852
 853/** Resets the SO write offset registers for transform feedback on gen7. */
 854#define I915_EXEC_GEN7_SOL_RESET        (1<<8)
 855
 856/** Request a privileged ("secure") batch buffer. Note only available for
 857 * DRM_ROOT_ONLY | DRM_MASTER processes.
 858 */
 859#define I915_EXEC_SECURE                (1<<9)
 860
 861/** Inform the kernel that the batch is and will always be pinned. This
 862 * negates the requirement for a workaround to be performed to avoid
 863 * an incoherent CS (such as can be found on 830/845). If this flag is
 864 * not passed, the kernel will endeavour to make sure the batch is
 865 * coherent with the CS before execution. If this flag is passed,
 866 * userspace assumes the responsibility for ensuring the same.
 867 */
 868#define I915_EXEC_IS_PINNED             (1<<10)
 869
 870/** Provide a hint to the kernel that the command stream and auxiliary
 871 * state buffers already holds the correct presumed addresses and so the
 872 * relocation process may be skipped if no buffers need to be moved in
 873 * preparation for the execbuffer.
 874 */
 875#define I915_EXEC_NO_RELOC              (1<<11)
 876
 877/** Use the reloc.handle as an index into the exec object array rather
 878 * than as the per-file handle.
 879 */
 880#define I915_EXEC_HANDLE_LUT            (1<<12)
 881
 882/** Used for switching BSD rings on the platforms with two BSD rings */
 883#define I915_EXEC_BSD_SHIFT      (13)
 884#define I915_EXEC_BSD_MASK       (3 << I915_EXEC_BSD_SHIFT)
 885/* default ping-pong mode */
 886#define I915_EXEC_BSD_DEFAULT    (0 << I915_EXEC_BSD_SHIFT)
 887#define I915_EXEC_BSD_RING1      (1 << I915_EXEC_BSD_SHIFT)
 888#define I915_EXEC_BSD_RING2      (2 << I915_EXEC_BSD_SHIFT)
 889
 890/** Tell the kernel that the batchbuffer is processed by
 891 *  the resource streamer.
 892 */
 893#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
 894
 895/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
 896 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
 897 * the batch.
 898 *
 899 * Returns -EINVAL if the sync_file fd cannot be found.
 900 */
 901#define I915_EXEC_FENCE_IN              (1<<16)
 902
 903/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
 904 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
 905 * to the caller, and it should be close() after use. (The fd is a regular
 906 * file descriptor and will be cleaned up on process termination. It holds
 907 * a reference to the request, but nothing else.)
 908 *
 909 * The sync_file fd can be combined with other sync_file and passed either
 910 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
 911 * will only occur after this request completes), or to other devices.
 912 *
 913 * Using I915_EXEC_FENCE_OUT requires use of
 914 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
 915 * back to userspace. Failure to do so will cause the out-fence to always
 916 * be reported as zero, and the real fence fd to be leaked.
 917 */
 918#define I915_EXEC_FENCE_OUT             (1<<17)
 919
 920/*
 921 * Traditionally the execbuf ioctl has only considered the final element in
 922 * the execobject[] to be the executable batch. Often though, the client
 923 * will known the batch object prior to construction and being able to place
 924 * it into the execobject[] array first can simplify the relocation tracking.
 925 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
 926 * execobject[] as the * batch instead (the default is to use the last
 927 * element).
 928 */
 929#define I915_EXEC_BATCH_FIRST           (1<<18)
 930#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_BATCH_FIRST<<1))
 931
 932#define I915_EXEC_CONTEXT_ID_MASK       (0xffffffff)
 933#define i915_execbuffer2_set_context_id(eb2, context) \
 934        (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
 935#define i915_execbuffer2_get_context_id(eb2) \
 936        ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
 937
 938struct drm_i915_gem_pin {
 939        /** Handle of the buffer to be pinned. */
 940        __u32 handle;
 941        __u32 pad;
 942
 943        /** alignment required within the aperture */
 944        __u64 alignment;
 945
 946        /** Returned GTT offset of the buffer. */
 947        __u64 offset;
 948};
 949
 950struct drm_i915_gem_unpin {
 951        /** Handle of the buffer to be unpinned. */
 952        __u32 handle;
 953        __u32 pad;
 954};
 955
 956struct drm_i915_gem_busy {
 957        /** Handle of the buffer to check for busy */
 958        __u32 handle;
 959
 960        /** Return busy status
 961         *
 962         * A return of 0 implies that the object is idle (after
 963         * having flushed any pending activity), and a non-zero return that
 964         * the object is still in-flight on the GPU. (The GPU has not yet
 965         * signaled completion for all pending requests that reference the
 966         * object.) An object is guaranteed to become idle eventually (so
 967         * long as no new GPU commands are executed upon it). Due to the
 968         * asynchronous nature of the hardware, an object reported
 969         * as busy may become idle before the ioctl is completed.
 970         *
 971         * Furthermore, if the object is busy, which engine is busy is only
 972         * provided as a guide. There are race conditions which prevent the
 973         * report of which engines are busy from being always accurate.
 974         * However, the converse is not true. If the object is idle, the
 975         * result of the ioctl, that all engines are idle, is accurate.
 976         *
 977         * The returned dword is split into two fields to indicate both
 978         * the engines on which the object is being read, and the
 979         * engine on which it is currently being written (if any).
 980         *
 981         * The low word (bits 0:15) indicate if the object is being written
 982         * to by any engine (there can only be one, as the GEM implicit
 983         * synchronisation rules force writes to be serialised). Only the
 984         * engine for the last write is reported.
 985         *
 986         * The high word (bits 16:31) are a bitmask of which engines are
 987         * currently reading from the object. Multiple engines may be
 988         * reading from the object simultaneously.
 989         *
 990         * The value of each engine is the same as specified in the
 991         * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
 992         * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
 993         * the I915_EXEC_RENDER engine for execution, and so it is never
 994         * reported as active itself. Some hardware may have parallel
 995         * execution engines, e.g. multiple media engines, which are
 996         * mapped to the same identifier in the EXECBUFFER2 ioctl and
 997         * so are not separately reported for busyness.
 998         *
 999         * Caveat emptor:
1000         * Only the boolean result of this query is reliable; that is whether
1001         * the object is idle or busy. The report of which engines are busy
1002         * should be only used as a heuristic.
1003         */
1004        __u32 busy;
1005};
1006
1007/**
1008 * I915_CACHING_NONE
1009 *
1010 * GPU access is not coherent with cpu caches. Default for machines without an
1011 * LLC.
1012 */
1013#define I915_CACHING_NONE               0
1014/**
1015 * I915_CACHING_CACHED
1016 *
1017 * GPU access is coherent with cpu caches and furthermore the data is cached in
1018 * last-level caches shared between cpu cores and the gpu GT. Default on
1019 * machines with HAS_LLC.
1020 */
1021#define I915_CACHING_CACHED             1
1022/**
1023 * I915_CACHING_DISPLAY
1024 *
1025 * Special GPU caching mode which is coherent with the scanout engines.
1026 * Transparently falls back to I915_CACHING_NONE on platforms where no special
1027 * cache mode (like write-through or gfdt flushing) is available. The kernel
1028 * automatically sets this mode when using a buffer as a scanout target.
1029 * Userspace can manually set this mode to avoid a costly stall and clflush in
1030 * the hotpath of drawing the first frame.
1031 */
1032#define I915_CACHING_DISPLAY            2
1033
1034struct drm_i915_gem_caching {
1035        /**
1036         * Handle of the buffer to set/get the caching level of. */
1037        __u32 handle;
1038
1039        /**
1040         * Cacheing level to apply or return value
1041         *
1042         * bits0-15 are for generic caching control (i.e. the above defined
1043         * values). bits16-31 are reserved for platform-specific variations
1044         * (e.g. l3$ caching on gen7). */
1045        __u32 caching;
1046};
1047
1048#define I915_TILING_NONE        0
1049#define I915_TILING_X           1
1050#define I915_TILING_Y           2
1051#define I915_TILING_LAST        I915_TILING_Y
1052
1053#define I915_BIT_6_SWIZZLE_NONE         0
1054#define I915_BIT_6_SWIZZLE_9            1
1055#define I915_BIT_6_SWIZZLE_9_10         2
1056#define I915_BIT_6_SWIZZLE_9_11         3
1057#define I915_BIT_6_SWIZZLE_9_10_11      4
1058/* Not seen by userland */
1059#define I915_BIT_6_SWIZZLE_UNKNOWN      5
1060/* Seen by userland. */
1061#define I915_BIT_6_SWIZZLE_9_17         6
1062#define I915_BIT_6_SWIZZLE_9_10_17      7
1063
1064struct drm_i915_gem_set_tiling {
1065        /** Handle of the buffer to have its tiling state updated */
1066        __u32 handle;
1067
1068        /**
1069         * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1070         * I915_TILING_Y).
1071         *
1072         * This value is to be set on request, and will be updated by the
1073         * kernel on successful return with the actual chosen tiling layout.
1074         *
1075         * The tiling mode may be demoted to I915_TILING_NONE when the system
1076         * has bit 6 swizzling that can't be managed correctly by GEM.
1077         *
1078         * Buffer contents become undefined when changing tiling_mode.
1079         */
1080        __u32 tiling_mode;
1081
1082        /**
1083         * Stride in bytes for the object when in I915_TILING_X or
1084         * I915_TILING_Y.
1085         */
1086        __u32 stride;
1087
1088        /**
1089         * Returned address bit 6 swizzling required for CPU access through
1090         * mmap mapping.
1091         */
1092        __u32 swizzle_mode;
1093};
1094
1095struct drm_i915_gem_get_tiling {
1096        /** Handle of the buffer to get tiling state for. */
1097        __u32 handle;
1098
1099        /**
1100         * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1101         * I915_TILING_Y).
1102         */
1103        __u32 tiling_mode;
1104
1105        /**
1106         * Returned address bit 6 swizzling required for CPU access through
1107         * mmap mapping.
1108         */
1109        __u32 swizzle_mode;
1110
1111        /**
1112         * Returned address bit 6 swizzling required for CPU access through
1113         * mmap mapping whilst bound.
1114         */
1115        __u32 phys_swizzle_mode;
1116};
1117
1118struct drm_i915_gem_get_aperture {
1119        /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1120        __u64 aper_size;
1121
1122        /**
1123         * Available space in the aperture used by i915_gem_execbuffer, in
1124         * bytes
1125         */
1126        __u64 aper_available_size;
1127};
1128
1129struct drm_i915_get_pipe_from_crtc_id {
1130        /** ID of CRTC being requested **/
1131        __u32 crtc_id;
1132
1133        /** pipe of requested CRTC **/
1134        __u32 pipe;
1135};
1136
1137#define I915_MADV_WILLNEED 0
1138#define I915_MADV_DONTNEED 1
1139#define __I915_MADV_PURGED 2 /* internal state */
1140
1141struct drm_i915_gem_madvise {
1142        /** Handle of the buffer to change the backing store advice */
1143        __u32 handle;
1144
1145        /* Advice: either the buffer will be needed again in the near future,
1146         *         or wont be and could be discarded under memory pressure.
1147         */
1148        __u32 madv;
1149
1150        /** Whether the backing store still exists. */
1151        __u32 retained;
1152};
1153
1154/* flags */
1155#define I915_OVERLAY_TYPE_MASK          0xff
1156#define I915_OVERLAY_YUV_PLANAR         0x01
1157#define I915_OVERLAY_YUV_PACKED         0x02
1158#define I915_OVERLAY_RGB                0x03
1159
1160#define I915_OVERLAY_DEPTH_MASK         0xff00
1161#define I915_OVERLAY_RGB24              0x1000
1162#define I915_OVERLAY_RGB16              0x2000
1163#define I915_OVERLAY_RGB15              0x3000
1164#define I915_OVERLAY_YUV422             0x0100
1165#define I915_OVERLAY_YUV411             0x0200
1166#define I915_OVERLAY_YUV420             0x0300
1167#define I915_OVERLAY_YUV410             0x0400
1168
1169#define I915_OVERLAY_SWAP_MASK          0xff0000
1170#define I915_OVERLAY_NO_SWAP            0x000000
1171#define I915_OVERLAY_UV_SWAP            0x010000
1172#define I915_OVERLAY_Y_SWAP             0x020000
1173#define I915_OVERLAY_Y_AND_UV_SWAP      0x030000
1174
1175#define I915_OVERLAY_FLAGS_MASK         0xff000000
1176#define I915_OVERLAY_ENABLE             0x01000000
1177
1178struct drm_intel_overlay_put_image {
1179        /* various flags and src format description */
1180        __u32 flags;
1181        /* source picture description */
1182        __u32 bo_handle;
1183        /* stride values and offsets are in bytes, buffer relative */
1184        __u16 stride_Y; /* stride for packed formats */
1185        __u16 stride_UV;
1186        __u32 offset_Y; /* offset for packet formats */
1187        __u32 offset_U;
1188        __u32 offset_V;
1189        /* in pixels */
1190        __u16 src_width;
1191        __u16 src_height;
1192        /* to compensate the scaling factors for partially covered surfaces */
1193        __u16 src_scan_width;
1194        __u16 src_scan_height;
1195        /* output crtc description */
1196        __u32 crtc_id;
1197        __u16 dst_x;
1198        __u16 dst_y;
1199        __u16 dst_width;
1200        __u16 dst_height;
1201};
1202
1203/* flags */
1204#define I915_OVERLAY_UPDATE_ATTRS       (1<<0)
1205#define I915_OVERLAY_UPDATE_GAMMA       (1<<1)
1206#define I915_OVERLAY_DISABLE_DEST_COLORKEY      (1<<2)
1207struct drm_intel_overlay_attrs {
1208        __u32 flags;
1209        __u32 color_key;
1210        __s32 brightness;
1211        __u32 contrast;
1212        __u32 saturation;
1213        __u32 gamma0;
1214        __u32 gamma1;
1215        __u32 gamma2;
1216        __u32 gamma3;
1217        __u32 gamma4;
1218        __u32 gamma5;
1219};
1220
1221/*
1222 * Intel sprite handling
1223 *
1224 * Color keying works with a min/mask/max tuple.  Both source and destination
1225 * color keying is allowed.
1226 *
1227 * Source keying:
1228 * Sprite pixels within the min & max values, masked against the color channels
1229 * specified in the mask field, will be transparent.  All other pixels will
1230 * be displayed on top of the primary plane.  For RGB surfaces, only the min
1231 * and mask fields will be used; ranged compares are not allowed.
1232 *
1233 * Destination keying:
1234 * Primary plane pixels that match the min value, masked against the color
1235 * channels specified in the mask field, will be replaced by corresponding
1236 * pixels from the sprite plane.
1237 *
1238 * Note that source & destination keying are exclusive; only one can be
1239 * active on a given plane.
1240 */
1241
1242#define I915_SET_COLORKEY_NONE          (1<<0) /* disable color key matching */
1243#define I915_SET_COLORKEY_DESTINATION   (1<<1)
1244#define I915_SET_COLORKEY_SOURCE        (1<<2)
1245struct drm_intel_sprite_colorkey {
1246        __u32 plane_id;
1247        __u32 min_value;
1248        __u32 channel_mask;
1249        __u32 max_value;
1250        __u32 flags;
1251};
1252
1253struct drm_i915_gem_wait {
1254        /** Handle of BO we shall wait on */
1255        __u32 bo_handle;
1256        __u32 flags;
1257        /** Number of nanoseconds to wait, Returns time remaining. */
1258        __s64 timeout_ns;
1259};
1260
1261struct drm_i915_gem_context_create {
1262        /*  output: id of new context*/
1263        __u32 ctx_id;
1264        __u32 pad;
1265};
1266
1267struct drm_i915_gem_context_destroy {
1268        __u32 ctx_id;
1269        __u32 pad;
1270};
1271
1272struct drm_i915_reg_read {
1273        /*
1274         * Register offset.
1275         * For 64bit wide registers where the upper 32bits don't immediately
1276         * follow the lower 32bits, the offset of the lower 32bits must
1277         * be specified
1278         */
1279        __u64 offset;
1280        __u64 val; /* Return value */
1281};
1282/* Known registers:
1283 *
1284 * Render engine timestamp - 0x2358 + 64bit - gen7+
1285 * - Note this register returns an invalid value if using the default
1286 *   single instruction 8byte read, in order to workaround that use
1287 *   offset (0x2538 | 1) instead.
1288 *
1289 */
1290
1291struct drm_i915_reset_stats {
1292        __u32 ctx_id;
1293        __u32 flags;
1294
1295        /* All resets since boot/module reload, for all contexts */
1296        __u32 reset_count;
1297
1298        /* Number of batches lost when active in GPU, for this context */
1299        __u32 batch_active;
1300
1301        /* Number of batches lost pending for execution, for this context */
1302        __u32 batch_pending;
1303
1304        __u32 pad;
1305};
1306
1307struct drm_i915_gem_userptr {
1308        __u64 user_ptr;
1309        __u64 user_size;
1310        __u32 flags;
1311#define I915_USERPTR_READ_ONLY 0x1
1312#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1313        /**
1314         * Returned handle for the object.
1315         *
1316         * Object handles are nonzero.
1317         */
1318        __u32 handle;
1319};
1320
1321struct drm_i915_gem_context_param {
1322        __u32 ctx_id;
1323        __u32 size;
1324        __u64 param;
1325#define I915_CONTEXT_PARAM_BAN_PERIOD   0x1
1326#define I915_CONTEXT_PARAM_NO_ZEROMAP   0x2
1327#define I915_CONTEXT_PARAM_GTT_SIZE     0x3
1328#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE     0x4
1329#define I915_CONTEXT_PARAM_BANNABLE     0x5
1330        __u64 value;
1331};
1332
1333enum drm_i915_oa_format {
1334        I915_OA_FORMAT_A13 = 1,     /* HSW only */
1335        I915_OA_FORMAT_A29,         /* HSW only */
1336        I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
1337        I915_OA_FORMAT_B4_C8,       /* HSW only */
1338        I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
1339        I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
1340        I915_OA_FORMAT_C4_B8,       /* HSW+ */
1341
1342        /* Gen8+ */
1343        I915_OA_FORMAT_A12,
1344        I915_OA_FORMAT_A12_B8_C8,
1345        I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1346
1347        I915_OA_FORMAT_MAX          /* non-ABI */
1348};
1349
1350enum drm_i915_perf_property_id {
1351        /**
1352         * Open the stream for a specific context handle (as used with
1353         * execbuffer2). A stream opened for a specific context this way
1354         * won't typically require root privileges.
1355         */
1356        DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1357
1358        /**
1359         * A value of 1 requests the inclusion of raw OA unit reports as
1360         * part of stream samples.
1361         */
1362        DRM_I915_PERF_PROP_SAMPLE_OA,
1363
1364        /**
1365         * The value specifies which set of OA unit metrics should be
1366         * be configured, defining the contents of any OA unit reports.
1367         */
1368        DRM_I915_PERF_PROP_OA_METRICS_SET,
1369
1370        /**
1371         * The value specifies the size and layout of OA unit reports.
1372         */
1373        DRM_I915_PERF_PROP_OA_FORMAT,
1374
1375        /**
1376         * Specifying this property implicitly requests periodic OA unit
1377         * sampling and (at least on Haswell) the sampling frequency is derived
1378         * from this exponent as follows:
1379         *
1380         *   80ns * 2^(period_exponent + 1)
1381         */
1382        DRM_I915_PERF_PROP_OA_EXPONENT,
1383
1384        DRM_I915_PERF_PROP_MAX /* non-ABI */
1385};
1386
1387struct drm_i915_perf_open_param {
1388        __u32 flags;
1389#define I915_PERF_FLAG_FD_CLOEXEC       (1<<0)
1390#define I915_PERF_FLAG_FD_NONBLOCK      (1<<1)
1391#define I915_PERF_FLAG_DISABLED         (1<<2)
1392
1393        /** The number of u64 (id, value) pairs */
1394        __u32 num_properties;
1395
1396        /**
1397         * Pointer to array of u64 (id, value) pairs configuring the stream
1398         * to open.
1399         */
1400        __u64 properties_ptr;
1401};
1402
1403/**
1404 * Enable data capture for a stream that was either opened in a disabled state
1405 * via I915_PERF_FLAG_DISABLED or was later disabled via
1406 * I915_PERF_IOCTL_DISABLE.
1407 *
1408 * It is intended to be cheaper to disable and enable a stream than it may be
1409 * to close and re-open a stream with the same configuration.
1410 *
1411 * It's undefined whether any pending data for the stream will be lost.
1412 */
1413#define I915_PERF_IOCTL_ENABLE  _IO('i', 0x0)
1414
1415/**
1416 * Disable data capture for a stream.
1417 *
1418 * It is an error to try and read a stream that is disabled.
1419 */
1420#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
1421
1422/**
1423 * Common to all i915 perf records
1424 */
1425struct drm_i915_perf_record_header {
1426        __u32 type;
1427        __u16 pad;
1428        __u16 size;
1429};
1430
1431enum drm_i915_perf_record_type {
1432
1433        /**
1434         * Samples are the work horse record type whose contents are extensible
1435         * and defined when opening an i915 perf stream based on the given
1436         * properties.
1437         *
1438         * Boolean properties following the naming convention
1439         * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
1440         * every sample.
1441         *
1442         * The order of these sample properties given by userspace has no
1443         * affect on the ordering of data within a sample. The order is
1444         * documented here.
1445         *
1446         * struct {
1447         *     struct drm_i915_perf_record_header header;
1448         *
1449         *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
1450         * };
1451         */
1452        DRM_I915_PERF_RECORD_SAMPLE = 1,
1453
1454        /*
1455         * Indicates that one or more OA reports were not written by the
1456         * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
1457         * command collides with periodic sampling - which would be more likely
1458         * at higher sampling frequencies.
1459         */
1460        DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
1461
1462        /**
1463         * An error occurred that resulted in all pending OA reports being lost.
1464         */
1465        DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
1466
1467        DRM_I915_PERF_RECORD_MAX /* non-ABI */
1468};
1469
1470#if defined(__cplusplus)
1471}
1472#endif
1473
1474#endif /* _UAPI_I915_DRM_H_ */
1475