1/* 2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27#ifndef _UAPI_I915_DRM_H_ 28#define _UAPI_I915_DRM_H_ 29 30#include "drm.h" 31 32#if defined(__cplusplus) 33extern "C" { 34#endif 35 36/* Please note that modifications to all structs defined here are 37 * subject to backwards-compatibility constraints. 38 */ 39 40/** 41 * DOC: uevents generated by i915 on it's device node 42 * 43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch 44 * event from the gpu l3 cache. Additional information supplied is ROW, 45 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep 46 * track of these events and if a specific cache-line seems to have a 47 * persistent error remap it with the l3 remapping tool supplied in 48 * intel-gpu-tools. The value supplied with the event is always 1. 49 * 50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via 51 * hangcheck. The error detection event is a good indicator of when things 52 * began to go badly. The value supplied with the event is a 1 upon error 53 * detection, and a 0 upon reset completion, signifying no more error 54 * exists. NOTE: Disabling hangcheck or reset via module parameter will 55 * cause the related events to not be seen. 56 * 57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the 58 * the GPU. The value supplied with the event is always 1. NOTE: Disable 59 * reset via module parameter will cause this event to not be seen. 60 */ 61#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR" 62#define I915_ERROR_UEVENT "ERROR" 63#define I915_RESET_UEVENT "RESET" 64 65/* 66 * i915_user_extension: Base class for defining a chain of extensions 67 * 68 * Many interfaces need to grow over time. In most cases we can simply 69 * extend the struct and have userspace pass in more data. Another option, 70 * as demonstrated by Vulkan's approach to providing extensions for forward 71 * and backward compatibility, is to use a list of optional structs to 72 * provide those extra details. 73 * 74 * The key advantage to using an extension chain is that it allows us to 75 * redefine the interface more easily than an ever growing struct of 76 * increasing complexity, and for large parts of that interface to be 77 * entirely optional. The downside is more pointer chasing; chasing across 78 * the __user boundary with pointers encapsulated inside u64. 79 */ 80struct i915_user_extension { 81 __u64 next_extension; 82 __u32 name; 83 __u32 flags; /* All undefined bits must be zero. */ 84 __u32 rsvd[4]; /* Reserved for future use; must be zero. */ 85}; 86 87/* 88 * MOCS indexes used for GPU surfaces, defining the cacheability of the 89 * surface data and the coherency for this data wrt. CPU vs. GPU accesses. 90 */ 91enum i915_mocs_table_index { 92 /* 93 * Not cached anywhere, coherency between CPU and GPU accesses is 94 * guaranteed. 95 */ 96 I915_MOCS_UNCACHED, 97 /* 98 * Cacheability and coherency controlled by the kernel automatically 99 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current 100 * usage of the surface (used for display scanout or not). 101 */ 102 I915_MOCS_PTE, 103 /* 104 * Cached in all GPU caches available on the platform. 105 * Coherency between CPU and GPU accesses to the surface is not 106 * guaranteed without extra synchronization. 107 */ 108 I915_MOCS_CACHED, 109}; 110 111/* 112 * Different engines serve different roles, and there may be more than one 113 * engine serving each role. enum drm_i915_gem_engine_class provides a 114 * classification of the role of the engine, which may be used when requesting 115 * operations to be performed on a certain subset of engines, or for providing 116 * information about that group. 117 */ 118enum drm_i915_gem_engine_class { 119 I915_ENGINE_CLASS_RENDER = 0, 120 I915_ENGINE_CLASS_COPY = 1, 121 I915_ENGINE_CLASS_VIDEO = 2, 122 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, 123 124 /* should be kept compact */ 125 126 I915_ENGINE_CLASS_INVALID = -1 127}; 128 129/* 130 * There may be more than one engine fulfilling any role within the system. 131 * Each engine of a class is given a unique instance number and therefore 132 * any engine can be specified by its class:instance tuplet. APIs that allow 133 * access to any engine in the system will use struct i915_engine_class_instance 134 * for this identification. 135 */ 136struct i915_engine_class_instance { 137 __u16 engine_class; /* see enum drm_i915_gem_engine_class */ 138 __u16 engine_instance; 139#define I915_ENGINE_CLASS_INVALID_NONE -1 140#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2 141}; 142 143/** 144 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915 145 * 146 */ 147 148enum drm_i915_pmu_engine_sample { 149 I915_SAMPLE_BUSY = 0, 150 I915_SAMPLE_WAIT = 1, 151 I915_SAMPLE_SEMA = 2 152}; 153 154#define I915_PMU_SAMPLE_BITS (4) 155#define I915_PMU_SAMPLE_MASK (0xf) 156#define I915_PMU_SAMPLE_INSTANCE_BITS (8) 157#define I915_PMU_CLASS_SHIFT \ 158 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS) 159 160#define __I915_PMU_ENGINE(class, instance, sample) \ 161 ((class) << I915_PMU_CLASS_SHIFT | \ 162 (instance) << I915_PMU_SAMPLE_BITS | \ 163 (sample)) 164 165#define I915_PMU_ENGINE_BUSY(class, instance) \ 166 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY) 167 168#define I915_PMU_ENGINE_WAIT(class, instance) \ 169 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT) 170 171#define I915_PMU_ENGINE_SEMA(class, instance) \ 172 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA) 173 174#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) 175 176#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0) 177#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1) 178#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2) 179#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3) 180 181#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY 182 183/* Each region is a minimum of 16k, and there are at most 255 of them. 184 */ 185#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use 186 * of chars for next/prev indices */ 187#define I915_LOG_MIN_TEX_REGION_SIZE 14 188 189typedef struct _drm_i915_init { 190 enum { 191 I915_INIT_DMA = 0x01, 192 I915_CLEANUP_DMA = 0x02, 193 I915_RESUME_DMA = 0x03 194 } func; 195 unsigned int mmio_offset; 196 int sarea_priv_offset; 197 unsigned int ring_start; 198 unsigned int ring_end; 199 unsigned int ring_size; 200 unsigned int front_offset; 201 unsigned int back_offset; 202 unsigned int depth_offset; 203 unsigned int w; 204 unsigned int h; 205 unsigned int pitch; 206 unsigned int pitch_bits; 207 unsigned int back_pitch; 208 unsigned int depth_pitch; 209 unsigned int cpp; 210 unsigned int chipset; 211} drm_i915_init_t; 212 213typedef struct _drm_i915_sarea { 214 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1]; 215 int last_upload; /* last time texture was uploaded */ 216 int last_enqueue; /* last time a buffer was enqueued */ 217 int last_dispatch; /* age of the most recently dispatched buffer */ 218 int ctxOwner; /* last context to upload state */ 219 int texAge; 220 int pf_enabled; /* is pageflipping allowed? */ 221 int pf_active; 222 int pf_current_page; /* which buffer is being displayed? */ 223 int perf_boxes; /* performance boxes to be displayed */ 224 int width, height; /* screen size in pixels */ 225 226 drm_handle_t front_handle; 227 int front_offset; 228 int front_size; 229 230 drm_handle_t back_handle; 231 int back_offset; 232 int back_size; 233 234 drm_handle_t depth_handle; 235 int depth_offset; 236 int depth_size; 237 238 drm_handle_t tex_handle; 239 int tex_offset; 240 int tex_size; 241 int log_tex_granularity; 242 int pitch; 243 int rotation; /* 0, 90, 180 or 270 */ 244 int rotated_offset; 245 int rotated_size; 246 int rotated_pitch; 247 int virtualX, virtualY; 248 249 unsigned int front_tiled; 250 unsigned int back_tiled; 251 unsigned int depth_tiled; 252 unsigned int rotated_tiled; 253 unsigned int rotated2_tiled; 254 255 int pipeA_x; 256 int pipeA_y; 257 int pipeA_w; 258 int pipeA_h; 259 int pipeB_x; 260 int pipeB_y; 261 int pipeB_w; 262 int pipeB_h; 263 264 /* fill out some space for old userspace triple buffer */ 265 drm_handle_t unused_handle; 266 __u32 unused1, unused2, unused3; 267 268 /* buffer object handles for static buffers. May change 269 * over the lifetime of the client. 270 */ 271 __u32 front_bo_handle; 272 __u32 back_bo_handle; 273 __u32 unused_bo_handle; 274 __u32 depth_bo_handle; 275 276} drm_i915_sarea_t; 277 278/* due to userspace building against these headers we need some compat here */ 279#define planeA_x pipeA_x 280#define planeA_y pipeA_y 281#define planeA_w pipeA_w 282#define planeA_h pipeA_h 283#define planeB_x pipeB_x 284#define planeB_y pipeB_y 285#define planeB_w pipeB_w 286#define planeB_h pipeB_h 287 288/* Flags for perf_boxes 289 */ 290#define I915_BOX_RING_EMPTY 0x1 291#define I915_BOX_FLIP 0x2 292#define I915_BOX_WAIT 0x4 293#define I915_BOX_TEXTURE_LOAD 0x8 294#define I915_BOX_LOST_CONTEXT 0x10 295 296/* 297 * i915 specific ioctls. 298 * 299 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie 300 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset 301 * against DRM_COMMAND_BASE and should be between [0x0, 0x60). 302 */ 303#define DRM_I915_INIT 0x00 304#define DRM_I915_FLUSH 0x01 305#define DRM_I915_FLIP 0x02 306#define DRM_I915_BATCHBUFFER 0x03 307#define DRM_I915_IRQ_EMIT 0x04 308#define DRM_I915_IRQ_WAIT 0x05 309#define DRM_I915_GETPARAM 0x06 310#define DRM_I915_SETPARAM 0x07 311#define DRM_I915_ALLOC 0x08 312#define DRM_I915_FREE 0x09 313#define DRM_I915_INIT_HEAP 0x0a 314#define DRM_I915_CMDBUFFER 0x0b 315#define DRM_I915_DESTROY_HEAP 0x0c 316#define DRM_I915_SET_VBLANK_PIPE 0x0d 317#define DRM_I915_GET_VBLANK_PIPE 0x0e 318#define DRM_I915_VBLANK_SWAP 0x0f 319#define DRM_I915_HWS_ADDR 0x11 320#define DRM_I915_GEM_INIT 0x13 321#define DRM_I915_GEM_EXECBUFFER 0x14 322#define DRM_I915_GEM_PIN 0x15 323#define DRM_I915_GEM_UNPIN 0x16 324#define DRM_I915_GEM_BUSY 0x17 325#define DRM_I915_GEM_THROTTLE 0x18 326#define DRM_I915_GEM_ENTERVT 0x19 327#define DRM_I915_GEM_LEAVEVT 0x1a 328#define DRM_I915_GEM_CREATE 0x1b 329#define DRM_I915_GEM_PREAD 0x1c 330#define DRM_I915_GEM_PWRITE 0x1d 331#define DRM_I915_GEM_MMAP 0x1e 332#define DRM_I915_GEM_SET_DOMAIN 0x1f 333#define DRM_I915_GEM_SW_FINISH 0x20 334#define DRM_I915_GEM_SET_TILING 0x21 335#define DRM_I915_GEM_GET_TILING 0x22 336#define DRM_I915_GEM_GET_APERTURE 0x23 337#define DRM_I915_GEM_MMAP_GTT 0x24 338#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 339#define DRM_I915_GEM_MADVISE 0x26 340#define DRM_I915_OVERLAY_PUT_IMAGE 0x27 341#define DRM_I915_OVERLAY_ATTRS 0x28 342#define DRM_I915_GEM_EXECBUFFER2 0x29 343#define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2 344#define DRM_I915_GET_SPRITE_COLORKEY 0x2a 345#define DRM_I915_SET_SPRITE_COLORKEY 0x2b 346#define DRM_I915_GEM_WAIT 0x2c 347#define DRM_I915_GEM_CONTEXT_CREATE 0x2d 348#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e 349#define DRM_I915_GEM_SET_CACHING 0x2f 350#define DRM_I915_GEM_GET_CACHING 0x30 351#define DRM_I915_REG_READ 0x31 352#define DRM_I915_GET_RESET_STATS 0x32 353#define DRM_I915_GEM_USERPTR 0x33 354#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 355#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 356#define DRM_I915_PERF_OPEN 0x36 357#define DRM_I915_PERF_ADD_CONFIG 0x37 358#define DRM_I915_PERF_REMOVE_CONFIG 0x38 359#define DRM_I915_QUERY 0x39 360#define DRM_I915_GEM_VM_CREATE 0x3a 361#define DRM_I915_GEM_VM_DESTROY 0x3b 362/* Must be kept compact -- no holes */ 363 364#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 365#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 366#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP) 367#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t) 368#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t) 369#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t) 370#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t) 371#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t) 372#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t) 373#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t) 374#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t) 375#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t) 376#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) 377#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 378#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 379#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 380#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) 381#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 382#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 383#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 384#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2) 385#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 386#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 387#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 388#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching) 389#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching) 390#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 391#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 392#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 393#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 394#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 395#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 396#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 397#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt) 398#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 399#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 400#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 401#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 402#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 403#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id) 404#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 405#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) 406#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) 407#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 408#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 409#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 410#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 411#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext) 412#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 413#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 414#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) 415#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) 416#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 417#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 418#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 419#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) 420#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) 421#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) 422#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control) 423#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control) 424 425/* Allow drivers to submit batchbuffers directly to hardware, relying 426 * on the security mechanisms provided by hardware. 427 */ 428typedef struct drm_i915_batchbuffer { 429 int start; /* agp offset */ 430 int used; /* nr bytes in use */ 431 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 432 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 433 int num_cliprects; /* mulitpass with multiple cliprects? */ 434 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 435} drm_i915_batchbuffer_t; 436 437/* As above, but pass a pointer to userspace buffer which can be 438 * validated by the kernel prior to sending to hardware. 439 */ 440typedef struct _drm_i915_cmdbuffer { 441 char __user *buf; /* pointer to userspace command buffer */ 442 int sz; /* nr bytes in buf */ 443 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ 444 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ 445 int num_cliprects; /* mulitpass with multiple cliprects? */ 446 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */ 447} drm_i915_cmdbuffer_t; 448 449/* Userspace can request & wait on irq's: 450 */ 451typedef struct drm_i915_irq_emit { 452 int __user *irq_seq; 453} drm_i915_irq_emit_t; 454 455typedef struct drm_i915_irq_wait { 456 int irq_seq; 457} drm_i915_irq_wait_t; 458 459/* 460 * Different modes of per-process Graphics Translation Table, 461 * see I915_PARAM_HAS_ALIASING_PPGTT 462 */ 463#define I915_GEM_PPGTT_NONE 0 464#define I915_GEM_PPGTT_ALIASING 1 465#define I915_GEM_PPGTT_FULL 2 466 467/* Ioctl to query kernel params: 468 */ 469#define I915_PARAM_IRQ_ACTIVE 1 470#define I915_PARAM_ALLOW_BATCHBUFFER 2 471#define I915_PARAM_LAST_DISPATCH 3 472#define I915_PARAM_CHIPSET_ID 4 473#define I915_PARAM_HAS_GEM 5 474#define I915_PARAM_NUM_FENCES_AVAIL 6 475#define I915_PARAM_HAS_OVERLAY 7 476#define I915_PARAM_HAS_PAGEFLIPPING 8 477#define I915_PARAM_HAS_EXECBUF2 9 478#define I915_PARAM_HAS_BSD 10 479#define I915_PARAM_HAS_BLT 11 480#define I915_PARAM_HAS_RELAXED_FENCING 12 481#define I915_PARAM_HAS_COHERENT_RINGS 13 482#define I915_PARAM_HAS_EXEC_CONSTANTS 14 483#define I915_PARAM_HAS_RELAXED_DELTA 15 484#define I915_PARAM_HAS_GEN7_SOL_RESET 16 485#define I915_PARAM_HAS_LLC 17 486#define I915_PARAM_HAS_ALIASING_PPGTT 18 487#define I915_PARAM_HAS_WAIT_TIMEOUT 19 488#define I915_PARAM_HAS_SEMAPHORES 20 489#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 490#define I915_PARAM_HAS_VEBOX 22 491#define I915_PARAM_HAS_SECURE_BATCHES 23 492#define I915_PARAM_HAS_PINNED_BATCHES 24 493#define I915_PARAM_HAS_EXEC_NO_RELOC 25 494#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 495#define I915_PARAM_HAS_WT 27 496#define I915_PARAM_CMD_PARSER_VERSION 28 497#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 498#define I915_PARAM_MMAP_VERSION 30 499#define I915_PARAM_HAS_BSD2 31 500#define I915_PARAM_REVISION 32 501#define I915_PARAM_SUBSLICE_TOTAL 33 502#define I915_PARAM_EU_TOTAL 34 503#define I915_PARAM_HAS_GPU_RESET 35 504#define I915_PARAM_HAS_RESOURCE_STREAMER 36 505#define I915_PARAM_HAS_EXEC_SOFTPIN 37 506#define I915_PARAM_HAS_POOLED_EU 38 507#define I915_PARAM_MIN_EU_IN_POOL 39 508#define I915_PARAM_MMAP_GTT_VERSION 40 509 510/* 511 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution 512 * priorities and the driver will attempt to execute batches in priority order. 513 * The param returns a capability bitmask, nonzero implies that the scheduler 514 * is enabled, with different features present according to the mask. 515 * 516 * The initial priority for each batch is supplied by the context and is 517 * controlled via I915_CONTEXT_PARAM_PRIORITY. 518 */ 519#define I915_PARAM_HAS_SCHEDULER 41 520#define I915_SCHEDULER_CAP_ENABLED (1ul << 0) 521#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) 522#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) 523#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) 524#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) 525 526#define I915_PARAM_HUC_STATUS 42 527 528/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of 529 * synchronisation with implicit fencing on individual objects. 530 * See EXEC_OBJECT_ASYNC. 531 */ 532#define I915_PARAM_HAS_EXEC_ASYNC 43 533 534/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support - 535 * both being able to pass in a sync_file fd to wait upon before executing, 536 * and being able to return a new sync_file fd that is signaled when the 537 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT. 538 */ 539#define I915_PARAM_HAS_EXEC_FENCE 44 540 541/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture 542 * user specified bufffers for post-mortem debugging of GPU hangs. See 543 * EXEC_OBJECT_CAPTURE. 544 */ 545#define I915_PARAM_HAS_EXEC_CAPTURE 45 546 547#define I915_PARAM_SLICE_MASK 46 548 549/* Assuming it's uniform for each slice, this queries the mask of subslices 550 * per-slice for this system. 551 */ 552#define I915_PARAM_SUBSLICE_MASK 47 553 554/* 555 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer 556 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST. 557 */ 558#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 559 560/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of 561 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY. 562 */ 563#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 564 565/* 566 * Query whether every context (both per-file default and user created) is 567 * isolated (insofar as HW supports). If this parameter is not true, then 568 * freshly created contexts may inherit values from an existing context, 569 * rather than default HW values. If true, it also ensures (insofar as HW 570 * supports) that all state set by this context will not leak to any other 571 * context. 572 * 573 * As not every engine across every gen support contexts, the returned 574 * value reports the support of context isolation for individual engines by 575 * returning a bitmask of each engine class set to true if that class supports 576 * isolation. 577 */ 578#define I915_PARAM_HAS_CONTEXT_ISOLATION 50 579 580/* Frequency of the command streamer timestamps given by the *_TIMESTAMP 581 * registers. This used to be fixed per platform but from CNL onwards, this 582 * might vary depending on the parts. 583 */ 584#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 585 586/* 587 * Once upon a time we supposed that writes through the GGTT would be 588 * immediately in physical memory (once flushed out of the CPU path). However, 589 * on a few different processors and chipsets, this is not necessarily the case 590 * as the writes appear to be buffered internally. Thus a read of the backing 591 * storage (physical memory) via a different path (with different physical tags 592 * to the indirect write via the GGTT) will see stale values from before 593 * the GGTT write. Inside the kernel, we can for the most part keep track of 594 * the different read/write domains in use (e.g. set-domain), but the assumption 595 * of coherency is baked into the ABI, hence reporting its true state in this 596 * parameter. 597 * 598 * Reports true when writes via mmap_gtt are immediately visible following an 599 * lfence to flush the WCB. 600 * 601 * Reports false when writes via mmap_gtt are indeterminately delayed in an in 602 * internal buffer and are _not_ immediately visible to third parties accessing 603 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC 604 * communications channel when reporting false is strongly disadvised. 605 */ 606#define I915_PARAM_MMAP_GTT_COHERENT 52 607 608/* 609 * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel 610 * execution through use of explicit fence support. 611 * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT. 612 */ 613#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53 614 615/* 616 * Revision of the i915-perf uAPI. The value returned helps determine what 617 * i915-perf features are available. See drm_i915_perf_property_id. 618 */ 619#define I915_PARAM_PERF_REVISION 54 620 621/* Must be kept compact -- no holes and well documented */ 622 623typedef struct drm_i915_getparam { 624 __s32 param; 625 /* 626 * WARNING: Using pointers instead of fixed-size u64 means we need to write 627 * compat32 code. Don't repeat this mistake. 628 */ 629 int __user *value; 630} drm_i915_getparam_t; 631 632/* Ioctl to set kernel params: 633 */ 634#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1 635#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 636#define I915_SETPARAM_ALLOW_BATCHBUFFER 3 637#define I915_SETPARAM_NUM_USED_FENCES 4 638/* Must be kept compact -- no holes */ 639 640typedef struct drm_i915_setparam { 641 int param; 642 int value; 643} drm_i915_setparam_t; 644 645/* A memory manager for regions of shared memory: 646 */ 647#define I915_MEM_REGION_AGP 1 648 649typedef struct drm_i915_mem_alloc { 650 int region; 651 int alignment; 652 int size; 653 int __user *region_offset; /* offset from start of fb or agp */ 654} drm_i915_mem_alloc_t; 655 656typedef struct drm_i915_mem_free { 657 int region; 658 int region_offset; 659} drm_i915_mem_free_t; 660 661typedef struct drm_i915_mem_init_heap { 662 int region; 663 int size; 664 int start; 665} drm_i915_mem_init_heap_t; 666 667/* Allow memory manager to be torn down and re-initialized (eg on 668 * rotate): 669 */ 670typedef struct drm_i915_mem_destroy_heap { 671 int region; 672} drm_i915_mem_destroy_heap_t; 673 674/* Allow X server to configure which pipes to monitor for vblank signals 675 */ 676#define DRM_I915_VBLANK_PIPE_A 1 677#define DRM_I915_VBLANK_PIPE_B 2 678 679typedef struct drm_i915_vblank_pipe { 680 int pipe; 681} drm_i915_vblank_pipe_t; 682 683/* Schedule buffer swap at given vertical blank: 684 */ 685typedef struct drm_i915_vblank_swap { 686 drm_drawable_t drawable; 687 enum drm_vblank_seq_type seqtype; 688 unsigned int sequence; 689} drm_i915_vblank_swap_t; 690 691typedef struct drm_i915_hws_addr { 692 __u64 addr; 693} drm_i915_hws_addr_t; 694 695struct drm_i915_gem_init { 696 /** 697 * Beginning offset in the GTT to be managed by the DRM memory 698 * manager. 699 */ 700 __u64 gtt_start; 701 /** 702 * Ending offset in the GTT to be managed by the DRM memory 703 * manager. 704 */ 705 __u64 gtt_end; 706}; 707 708struct drm_i915_gem_create { 709 /** 710 * Requested size for the object. 711 * 712 * The (page-aligned) allocated size for the object will be returned. 713 */ 714 __u64 size; 715 /** 716 * Returned handle for the object. 717 * 718 * Object handles are nonzero. 719 */ 720 __u32 handle; 721 __u32 pad; 722}; 723 724struct drm_i915_gem_pread { 725 /** Handle for the object being read. */ 726 __u32 handle; 727 __u32 pad; 728 /** Offset into the object to read from */ 729 __u64 offset; 730 /** Length of data to read */ 731 __u64 size; 732 /** 733 * Pointer to write the data into. 734 * 735 * This is a fixed-size type for 32/64 compatibility. 736 */ 737 __u64 data_ptr; 738}; 739 740struct drm_i915_gem_pwrite { 741 /** Handle for the object being written to. */ 742 __u32 handle; 743 __u32 pad; 744 /** Offset into the object to write to */ 745 __u64 offset; 746 /** Length of data to write */ 747 __u64 size; 748 /** 749 * Pointer to read the data from. 750 * 751 * This is a fixed-size type for 32/64 compatibility. 752 */ 753 __u64 data_ptr; 754}; 755 756struct drm_i915_gem_mmap { 757 /** Handle for the object being mapped. */ 758 __u32 handle; 759 __u32 pad; 760 /** Offset in the object to map. */ 761 __u64 offset; 762 /** 763 * Length of data to map. 764 * 765 * The value will be page-aligned. 766 */ 767 __u64 size; 768 /** 769 * Returned pointer the data was mapped at. 770 * 771 * This is a fixed-size type for 32/64 compatibility. 772 */ 773 __u64 addr_ptr; 774 775 /** 776 * Flags for extended behaviour. 777 * 778 * Added in version 2. 779 */ 780 __u64 flags; 781#define I915_MMAP_WC 0x1 782}; 783 784struct drm_i915_gem_mmap_gtt { 785 /** Handle for the object being mapped. */ 786 __u32 handle; 787 __u32 pad; 788 /** 789 * Fake offset to use for subsequent mmap call 790 * 791 * This is a fixed-size type for 32/64 compatibility. 792 */ 793 __u64 offset; 794}; 795 796struct drm_i915_gem_set_domain { 797 /** Handle for the object */ 798 __u32 handle; 799 800 /** New read domains */ 801 __u32 read_domains; 802 803 /** New write domain */ 804 __u32 write_domain; 805}; 806 807struct drm_i915_gem_sw_finish { 808 /** Handle for the object */ 809 __u32 handle; 810}; 811 812struct drm_i915_gem_relocation_entry { 813 /** 814 * Handle of the buffer being pointed to by this relocation entry. 815 * 816 * It's appealing to make this be an index into the mm_validate_entry 817 * list to refer to the buffer, but this allows the driver to create 818 * a relocation list for state buffers and not re-write it per 819 * exec using the buffer. 820 */ 821 __u32 target_handle; 822 823 /** 824 * Value to be added to the offset of the target buffer to make up 825 * the relocation entry. 826 */ 827 __u32 delta; 828 829 /** Offset in the buffer the relocation entry will be written into */ 830 __u64 offset; 831 832 /** 833 * Offset value of the target buffer that the relocation entry was last 834 * written as. 835 * 836 * If the buffer has the same offset as last time, we can skip syncing 837 * and writing the relocation. This value is written back out by 838 * the execbuffer ioctl when the relocation is written. 839 */ 840 __u64 presumed_offset; 841 842 /** 843 * Target memory domains read by this operation. 844 */ 845 __u32 read_domains; 846 847 /** 848 * Target memory domains written by this operation. 849 * 850 * Note that only one domain may be written by the whole 851 * execbuffer operation, so that where there are conflicts, 852 * the application will get -EINVAL back. 853 */ 854 __u32 write_domain; 855}; 856 857/** @{ 858 * Intel memory domains 859 * 860 * Most of these just align with the various caches in 861 * the system and are used to flush and invalidate as 862 * objects end up cached in different domains. 863 */ 864/** CPU cache */ 865#define I915_GEM_DOMAIN_CPU 0x00000001 866/** Render cache, used by 2D and 3D drawing */ 867#define I915_GEM_DOMAIN_RENDER 0x00000002 868/** Sampler cache, used by texture engine */ 869#define I915_GEM_DOMAIN_SAMPLER 0x00000004 870/** Command queue, used to load batch buffers */ 871#define I915_GEM_DOMAIN_COMMAND 0x00000008 872/** Instruction cache, used by shader programs */ 873#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 874/** Vertex address cache */ 875#define I915_GEM_DOMAIN_VERTEX 0x00000020 876/** GTT domain - aperture and scanout */ 877#define I915_GEM_DOMAIN_GTT 0x00000040 878/** WC domain - uncached access */ 879#define I915_GEM_DOMAIN_WC 0x00000080 880/** @} */ 881 882struct drm_i915_gem_exec_object { 883 /** 884 * User's handle for a buffer to be bound into the GTT for this 885 * operation. 886 */ 887 __u32 handle; 888 889 /** Number of relocations to be performed on this buffer */ 890 __u32 relocation_count; 891 /** 892 * Pointer to array of struct drm_i915_gem_relocation_entry containing 893 * the relocations to be performed in this buffer. 894 */ 895 __u64 relocs_ptr; 896 897 /** Required alignment in graphics aperture */ 898 __u64 alignment; 899 900 /** 901 * Returned value of the updated offset of the object, for future 902 * presumed_offset writes. 903 */ 904 __u64 offset; 905}; 906 907struct drm_i915_gem_execbuffer { 908 /** 909 * List of buffers to be validated with their relocations to be 910 * performend on them. 911 * 912 * This is a pointer to an array of struct drm_i915_gem_validate_entry. 913 * 914 * These buffers must be listed in an order such that all relocations 915 * a buffer is performing refer to buffers that have already appeared 916 * in the validate list. 917 */ 918 __u64 buffers_ptr; 919 __u32 buffer_count; 920 921 /** Offset in the batchbuffer to start execution from. */ 922 __u32 batch_start_offset; 923 /** Bytes used in batchbuffer from batch_start_offset */ 924 __u32 batch_len; 925 __u32 DR1; 926 __u32 DR4; 927 __u32 num_cliprects; 928 /** This is a struct drm_clip_rect *cliprects */ 929 __u64 cliprects_ptr; 930}; 931 932struct drm_i915_gem_exec_object2 { 933 /** 934 * User's handle for a buffer to be bound into the GTT for this 935 * operation. 936 */ 937 __u32 handle; 938 939 /** Number of relocations to be performed on this buffer */ 940 __u32 relocation_count; 941 /** 942 * Pointer to array of struct drm_i915_gem_relocation_entry containing 943 * the relocations to be performed in this buffer. 944 */ 945 __u64 relocs_ptr; 946 947 /** Required alignment in graphics aperture */ 948 __u64 alignment; 949 950 /** 951 * When the EXEC_OBJECT_PINNED flag is specified this is populated by 952 * the user with the GTT offset at which this object will be pinned. 953 * When the I915_EXEC_NO_RELOC flag is specified this must contain the 954 * presumed_offset of the object. 955 * During execbuffer2 the kernel populates it with the value of the 956 * current GTT offset of the object, for future presumed_offset writes. 957 */ 958 __u64 offset; 959 960#define EXEC_OBJECT_NEEDS_FENCE (1<<0) 961#define EXEC_OBJECT_NEEDS_GTT (1<<1) 962#define EXEC_OBJECT_WRITE (1<<2) 963#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) 964#define EXEC_OBJECT_PINNED (1<<4) 965#define EXEC_OBJECT_PAD_TO_SIZE (1<<5) 966/* The kernel implicitly tracks GPU activity on all GEM objects, and 967 * synchronises operations with outstanding rendering. This includes 968 * rendering on other devices if exported via dma-buf. However, sometimes 969 * this tracking is too coarse and the user knows better. For example, 970 * if the object is split into non-overlapping ranges shared between different 971 * clients or engines (i.e. suballocating objects), the implicit tracking 972 * by kernel assumes that each operation affects the whole object rather 973 * than an individual range, causing needless synchronisation between clients. 974 * The kernel will also forgo any CPU cache flushes prior to rendering from 975 * the object as the client is expected to be also handling such domain 976 * tracking. 977 * 978 * The kernel maintains the implicit tracking in order to manage resources 979 * used by the GPU - this flag only disables the synchronisation prior to 980 * rendering with this object in this execbuf. 981 * 982 * Opting out of implicit synhronisation requires the user to do its own 983 * explicit tracking to avoid rendering corruption. See, for example, 984 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously. 985 */ 986#define EXEC_OBJECT_ASYNC (1<<6) 987/* Request that the contents of this execobject be copied into the error 988 * state upon a GPU hang involving this batch for post-mortem debugging. 989 * These buffers are recorded in no particular order as "user" in 990 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see 991 * if the kernel supports this flag. 992 */ 993#define EXEC_OBJECT_CAPTURE (1<<7) 994/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */ 995#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1) 996 __u64 flags; 997 998 union { 999 __u64 rsvd1; 1000 __u64 pad_to_size;
1001 }; 1002 __u64 rsvd2; 1003}; 1004 1005struct drm_i915_gem_exec_fence { 1006 /** 1007 * User's handle for a drm_syncobj to wait on or signal. 1008 */ 1009 __u32 handle; 1010 1011#define I915_EXEC_FENCE_WAIT (1<<0) 1012#define I915_EXEC_FENCE_SIGNAL (1<<1) 1013#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1)) 1014 __u32 flags; 1015}; 1016 1017struct drm_i915_gem_execbuffer2 { 1018 /** 1019 * List of gem_exec_object2 structs 1020 */ 1021 __u64 buffers_ptr; 1022 __u32 buffer_count; 1023 1024 /** Offset in the batchbuffer to start execution from. */ 1025 __u32 batch_start_offset; 1026 /** Bytes used in batchbuffer from batch_start_offset */ 1027 __u32 batch_len; 1028 __u32 DR1; 1029 __u32 DR4; 1030 __u32 num_cliprects; 1031 /** 1032 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY 1033 * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a 1034 * struct drm_i915_gem_exec_fence *fences. 1035 */ 1036 __u64 cliprects_ptr; 1037#define I915_EXEC_RING_MASK (0x3f) 1038#define I915_EXEC_DEFAULT (0<<0) 1039#define I915_EXEC_RENDER (1<<0) 1040#define I915_EXEC_BSD (2<<0) 1041#define I915_EXEC_BLT (3<<0) 1042#define I915_EXEC_VEBOX (4<<0) 1043 1044/* Used for switching the constants addressing mode on gen4+ RENDER ring. 1045 * Gen6+ only supports relative addressing to dynamic state (default) and 1046 * absolute addressing. 1047 * 1048 * These flags are ignored for the BSD and BLT rings. 1049 */ 1050#define I915_EXEC_CONSTANTS_MASK (3<<6) 1051#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ 1052#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) 1053#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ 1054 __u64 flags; 1055 __u64 rsvd1; /* now used for context info */ 1056 __u64 rsvd2; 1057}; 1058 1059/** Resets the SO write offset registers for transform feedback on gen7. */ 1060#define I915_EXEC_GEN7_SOL_RESET (1<<8) 1061 1062/** Request a privileged ("secure") batch buffer. Note only available for 1063 * DRM_ROOT_ONLY | DRM_MASTER processes. 1064 */ 1065#define I915_EXEC_SECURE (1<<9) 1066 1067/** Inform the kernel that the batch is and will always be pinned. This 1068 * negates the requirement for a workaround to be performed to avoid 1069 * an incoherent CS (such as can be found on 830/845). If this flag is 1070 * not passed, the kernel will endeavour to make sure the batch is 1071 * coherent with the CS before execution. If this flag is passed, 1072 * userspace assumes the responsibility for ensuring the same. 1073 */ 1074#define I915_EXEC_IS_PINNED (1<<10) 1075 1076/** Provide a hint to the kernel that the command stream and auxiliary 1077 * state buffers already holds the correct presumed addresses and so the 1078 * relocation process may be skipped if no buffers need to be moved in 1079 * preparation for the execbuffer. 1080 */ 1081#define I915_EXEC_NO_RELOC (1<<11) 1082 1083/** Use the reloc.handle as an index into the exec object array rather 1084 * than as the per-file handle. 1085 */ 1086#define I915_EXEC_HANDLE_LUT (1<<12) 1087 1088/** Used for switching BSD rings on the platforms with two BSD rings */ 1089#define I915_EXEC_BSD_SHIFT (13) 1090#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT) 1091/* default ping-pong mode */ 1092#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT) 1093#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT) 1094#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT) 1095 1096/** Tell the kernel that the batchbuffer is processed by 1097 * the resource streamer. 1098 */ 1099#define I915_EXEC_RESOURCE_STREAMER (1<<15) 1100 1101/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent 1102 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 1103 * the batch. 1104 * 1105 * Returns -EINVAL if the sync_file fd cannot be found. 1106 */ 1107#define I915_EXEC_FENCE_IN (1<<16) 1108 1109/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd 1110 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given 1111 * to the caller, and it should be close() after use. (The fd is a regular 1112 * file descriptor and will be cleaned up on process termination. It holds 1113 * a reference to the request, but nothing else.) 1114 * 1115 * The sync_file fd can be combined with other sync_file and passed either 1116 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip 1117 * will only occur after this request completes), or to other devices. 1118 * 1119 * Using I915_EXEC_FENCE_OUT requires use of 1120 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written 1121 * back to userspace. Failure to do so will cause the out-fence to always 1122 * be reported as zero, and the real fence fd to be leaked. 1123 */ 1124#define I915_EXEC_FENCE_OUT (1<<17) 1125 1126/* 1127 * Traditionally the execbuf ioctl has only considered the final element in 1128 * the execobject[] to be the executable batch. Often though, the client 1129 * will known the batch object prior to construction and being able to place 1130 * it into the execobject[] array first can simplify the relocation tracking. 1131 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the 1132 * execobject[] as the * batch instead (the default is to use the last 1133 * element). 1134 */ 1135#define I915_EXEC_BATCH_FIRST (1<<18) 1136 1137/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr 1138 * define an array of i915_gem_exec_fence structures which specify a set of 1139 * dma fences to wait upon or signal. 1140 */ 1141#define I915_EXEC_FENCE_ARRAY (1<<19) 1142 1143/* 1144 * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent 1145 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing 1146 * the batch. 1147 * 1148 * Returns -EINVAL if the sync_file fd cannot be found. 1149 */ 1150#define I915_EXEC_FENCE_SUBMIT (1 << 20) 1151 1152#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1)) 1153 1154#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 1155#define i915_execbuffer2_set_context_id(eb2, context) \ 1156 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 1157#define i915_execbuffer2_get_context_id(eb2) \ 1158 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK) 1159 1160struct drm_i915_gem_pin { 1161 /** Handle of the buffer to be pinned. */ 1162 __u32 handle; 1163 __u32 pad; 1164 1165 /** alignment required within the aperture */ 1166 __u64 alignment; 1167 1168 /** Returned GTT offset of the buffer. */ 1169 __u64 offset; 1170}; 1171 1172struct drm_i915_gem_unpin { 1173 /** Handle of the buffer to be unpinned. */ 1174 __u32 handle; 1175 __u32 pad; 1176}; 1177 1178struct drm_i915_gem_busy { 1179 /** Handle of the buffer to check for busy */ 1180 __u32 handle; 1181 1182 /** Return busy status 1183 * 1184 * A return of 0 implies that the object is idle (after 1185 * having flushed any pending activity), and a non-zero return that 1186 * the object is still in-flight on the GPU. (The GPU has not yet 1187 * signaled completion for all pending requests that reference the 1188 * object.) An object is guaranteed to become idle eventually (so 1189 * long as no new GPU commands are executed upon it). Due to the 1190 * asynchronous nature of the hardware, an object reported 1191 * as busy may become idle before the ioctl is completed. 1192 * 1193 * Furthermore, if the object is busy, which engine is busy is only 1194 * provided as a guide and only indirectly by reporting its class 1195 * (there may be more than one engine in each class). There are race 1196 * conditions which prevent the report of which engines are busy from 1197 * being always accurate. However, the converse is not true. If the 1198 * object is idle, the result of the ioctl, that all engines are idle, 1199 * is accurate. 1200 * 1201 * The returned dword is split into two fields to indicate both 1202 * the engine classess on which the object is being read, and the 1203 * engine class on which it is currently being written (if any). 1204 * 1205 * The low word (bits 0:15) indicate if the object is being written 1206 * to by any engine (there can only be one, as the GEM implicit 1207 * synchronisation rules force writes to be serialised). Only the 1208 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as 1209 * 1 not 0 etc) for the last write is reported. 1210 * 1211 * The high word (bits 16:31) are a bitmask of which engines classes 1212 * are currently reading from the object. Multiple engines may be 1213 * reading from the object simultaneously. 1214 * 1215 * The value of each engine class is the same as specified in the 1216 * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e. 1217 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc. 1218 * reported as active itself. Some hardware may have parallel 1219 * execution engines, e.g. multiple media engines, which are 1220 * mapped to the same class identifier and so are not separately 1221 * reported for busyness. 1222 * 1223 * Caveat emptor: 1224 * Only the boolean result of this query is reliable; that is whether 1225 * the object is idle or busy. The report of which engines are busy 1226 * should be only used as a heuristic. 1227 */ 1228 __u32 busy; 1229}; 1230 1231/** 1232 * I915_CACHING_NONE 1233 * 1234 * GPU access is not coherent with cpu caches. Default for machines without an 1235 * LLC. 1236 */ 1237#define I915_CACHING_NONE 0 1238/** 1239 * I915_CACHING_CACHED 1240 * 1241 * GPU access is coherent with cpu caches and furthermore the data is cached in 1242 * last-level caches shared between cpu cores and the gpu GT. Default on 1243 * machines with HAS_LLC. 1244 */ 1245#define I915_CACHING_CACHED 1 1246/** 1247 * I915_CACHING_DISPLAY 1248 * 1249 * Special GPU caching mode which is coherent with the scanout engines. 1250 * Transparently falls back to I915_CACHING_NONE on platforms where no special 1251 * cache mode (like write-through or gfdt flushing) is available. The kernel 1252 * automatically sets this mode when using a buffer as a scanout target. 1253 * Userspace can manually set this mode to avoid a costly stall and clflush in 1254 * the hotpath of drawing the first frame. 1255 */ 1256#define I915_CACHING_DISPLAY 2 1257 1258struct drm_i915_gem_caching { 1259 /** 1260 * Handle of the buffer to set/get the caching level of. */ 1261 __u32 handle; 1262 1263 /** 1264 * Cacheing level to apply or return value 1265 * 1266 * bits0-15 are for generic caching control (i.e. the above defined 1267 * values). bits16-31 are reserved for platform-specific variations 1268 * (e.g. l3$ caching on gen7). */ 1269 __u32 caching; 1270}; 1271 1272#define I915_TILING_NONE 0 1273#define I915_TILING_X 1 1274#define I915_TILING_Y 2 1275#define I915_TILING_LAST I915_TILING_Y 1276 1277#define I915_BIT_6_SWIZZLE_NONE 0 1278#define I915_BIT_6_SWIZZLE_9 1 1279#define I915_BIT_6_SWIZZLE_9_10 2 1280#define I915_BIT_6_SWIZZLE_9_11 3 1281#define I915_BIT_6_SWIZZLE_9_10_11 4 1282/* Not seen by userland */ 1283#define I915_BIT_6_SWIZZLE_UNKNOWN 5 1284/* Seen by userland. */ 1285#define I915_BIT_6_SWIZZLE_9_17 6 1286#define I915_BIT_6_SWIZZLE_9_10_17 7 1287 1288struct drm_i915_gem_set_tiling { 1289 /** Handle of the buffer to have its tiling state updated */ 1290 __u32 handle; 1291 1292 /** 1293 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1294 * I915_TILING_Y). 1295 * 1296 * This value is to be set on request, and will be updated by the 1297 * kernel on successful return with the actual chosen tiling layout. 1298 * 1299 * The tiling mode may be demoted to I915_TILING_NONE when the system 1300 * has bit 6 swizzling that can't be managed correctly by GEM. 1301 * 1302 * Buffer contents become undefined when changing tiling_mode. 1303 */ 1304 __u32 tiling_mode; 1305 1306 /** 1307 * Stride in bytes for the object when in I915_TILING_X or 1308 * I915_TILING_Y. 1309 */ 1310 __u32 stride; 1311 1312 /** 1313 * Returned address bit 6 swizzling required for CPU access through 1314 * mmap mapping. 1315 */ 1316 __u32 swizzle_mode; 1317}; 1318 1319struct drm_i915_gem_get_tiling { 1320 /** Handle of the buffer to get tiling state for. */ 1321 __u32 handle; 1322 1323 /** 1324 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 1325 * I915_TILING_Y). 1326 */ 1327 __u32 tiling_mode; 1328 1329 /** 1330 * Returned address bit 6 swizzling required for CPU access through 1331 * mmap mapping. 1332 */ 1333 __u32 swizzle_mode; 1334 1335 /** 1336 * Returned address bit 6 swizzling required for CPU access through 1337 * mmap mapping whilst bound. 1338 */ 1339 __u32 phys_swizzle_mode; 1340}; 1341 1342struct drm_i915_gem_get_aperture { 1343 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */ 1344 __u64 aper_size; 1345 1346 /** 1347 * Available space in the aperture used by i915_gem_execbuffer, in 1348 * bytes 1349 */ 1350 __u64 aper_available_size; 1351}; 1352 1353struct drm_i915_get_pipe_from_crtc_id { 1354 /** ID of CRTC being requested **/ 1355 __u32 crtc_id; 1356 1357 /** pipe of requested CRTC **/ 1358 __u32 pipe; 1359}; 1360 1361#define I915_MADV_WILLNEED 0 1362#define I915_MADV_DONTNEED 1 1363#define __I915_MADV_PURGED 2 /* internal state */ 1364 1365struct drm_i915_gem_madvise { 1366 /** Handle of the buffer to change the backing store advice */ 1367 __u32 handle; 1368 1369 /* Advice: either the buffer will be needed again in the near future, 1370 * or wont be and could be discarded under memory pressure. 1371 */ 1372 __u32 madv; 1373 1374 /** Whether the backing store still exists. */ 1375 __u32 retained; 1376}; 1377 1378/* flags */ 1379#define I915_OVERLAY_TYPE_MASK 0xff 1380#define I915_OVERLAY_YUV_PLANAR 0x01 1381#define I915_OVERLAY_YUV_PACKED 0x02 1382#define I915_OVERLAY_RGB 0x03 1383 1384#define I915_OVERLAY_DEPTH_MASK 0xff00 1385#define I915_OVERLAY_RGB24 0x1000 1386#define I915_OVERLAY_RGB16 0x2000 1387#define I915_OVERLAY_RGB15 0x3000 1388#define I915_OVERLAY_YUV422 0x0100 1389#define I915_OVERLAY_YUV411 0x0200 1390#define I915_OVERLAY_YUV420 0x0300 1391#define I915_OVERLAY_YUV410 0x0400 1392 1393#define I915_OVERLAY_SWAP_MASK 0xff0000 1394#define I915_OVERLAY_NO_SWAP 0x000000 1395#define I915_OVERLAY_UV_SWAP 0x010000 1396#define I915_OVERLAY_Y_SWAP 0x020000 1397#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000 1398 1399#define I915_OVERLAY_FLAGS_MASK 0xff000000 1400#define I915_OVERLAY_ENABLE 0x01000000 1401 1402struct drm_intel_overlay_put_image { 1403 /* various flags and src format description */ 1404 __u32 flags; 1405 /* source picture description */ 1406 __u32 bo_handle; 1407 /* stride values and offsets are in bytes, buffer relative */ 1408 __u16 stride_Y; /* stride for packed formats */ 1409 __u16 stride_UV; 1410 __u32 offset_Y; /* offset for packet formats */ 1411 __u32 offset_U; 1412 __u32 offset_V; 1413 /* in pixels */ 1414 __u16 src_width; 1415 __u16 src_height; 1416 /* to compensate the scaling factors for partially covered surfaces */ 1417 __u16 src_scan_width; 1418 __u16 src_scan_height; 1419 /* output crtc description */ 1420 __u32 crtc_id; 1421 __u16 dst_x; 1422 __u16 dst_y; 1423 __u16 dst_width; 1424 __u16 dst_height; 1425}; 1426 1427/* flags */ 1428#define I915_OVERLAY_UPDATE_ATTRS (1<<0) 1429#define I915_OVERLAY_UPDATE_GAMMA (1<<1) 1430#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2) 1431struct drm_intel_overlay_attrs { 1432 __u32 flags; 1433 __u32 color_key; 1434 __s32 brightness; 1435 __u32 contrast; 1436 __u32 saturation; 1437 __u32 gamma0; 1438 __u32 gamma1; 1439 __u32 gamma2; 1440 __u32 gamma3; 1441 __u32 gamma4; 1442 __u32 gamma5; 1443}; 1444 1445/* 1446 * Intel sprite handling 1447 * 1448 * Color keying works with a min/mask/max tuple. Both source and destination 1449 * color keying is allowed. 1450 * 1451 * Source keying: 1452 * Sprite pixels within the min & max values, masked against the color channels 1453 * specified in the mask field, will be transparent. All other pixels will 1454 * be displayed on top of the primary plane. For RGB surfaces, only the min 1455 * and mask fields will be used; ranged compares are not allowed. 1456 * 1457 * Destination keying: 1458 * Primary plane pixels that match the min value, masked against the color 1459 * channels specified in the mask field, will be replaced by corresponding 1460 * pixels from the sprite plane. 1461 * 1462 * Note that source & destination keying are exclusive; only one can be 1463 * active on a given plane. 1464 */ 1465 1466#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set 1467 * flags==0 to disable colorkeying. 1468 */ 1469#define I915_SET_COLORKEY_DESTINATION (1<<1) 1470#define I915_SET_COLORKEY_SOURCE (1<<2) 1471struct drm_intel_sprite_colorkey { 1472 __u32 plane_id; 1473 __u32 min_value; 1474 __u32 channel_mask; 1475 __u32 max_value; 1476 __u32 flags; 1477}; 1478 1479struct drm_i915_gem_wait { 1480 /** Handle of BO we shall wait on */ 1481 __u32 bo_handle; 1482 __u32 flags; 1483 /** Number of nanoseconds to wait, Returns time remaining. */ 1484 __s64 timeout_ns; 1485}; 1486 1487struct drm_i915_gem_context_create { 1488 __u32 ctx_id; /* output: id of new context*/ 1489 __u32 pad; 1490}; 1491 1492struct drm_i915_gem_context_create_ext { 1493 __u32 ctx_id; /* output: id of new context*/ 1494 __u32 flags; 1495#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0) 1496#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1) 1497#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \ 1498 (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1)) 1499 __u64 extensions; 1500}; 1501 1502struct drm_i915_gem_context_param { 1503 __u32 ctx_id; 1504 __u32 size; 1505 __u64 param; 1506#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1507#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1508#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1509#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1510#define I915_CONTEXT_PARAM_BANNABLE 0x5 1511#define I915_CONTEXT_PARAM_PRIORITY 0x6 1512#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ 1513#define I915_CONTEXT_DEFAULT_PRIORITY 0 1514#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ 1515 /* 1516 * When using the following param, value should be a pointer to 1517 * drm_i915_gem_context_param_sseu. 1518 */ 1519#define I915_CONTEXT_PARAM_SSEU 0x7 1520 1521/* 1522 * Not all clients may want to attempt automatic recover of a context after 1523 * a hang (for example, some clients may only submit very small incremental 1524 * batches relying on known logical state of previous batches which will never 1525 * recover correctly and each attempt will hang), and so would prefer that 1526 * the context is forever banned instead. 1527 * 1528 * If set to false (0), after a reset, subsequent (and in flight) rendering 1529 * from this context is discarded, and the client will need to create a new 1530 * context to use instead. 1531 * 1532 * If set to true (1), the kernel will automatically attempt to recover the 1533 * context by skipping the hanging batch and executing the next batch starting 1534 * from the default context state (discarding the incomplete logical context 1535 * state lost due to the reset). 1536 * 1537 * On creation, all new contexts are marked as recoverable. 1538 */ 1539#define I915_CONTEXT_PARAM_RECOVERABLE 0x8 1540 1541 /* 1542 * The id of the associated virtual memory address space (ppGTT) of 1543 * this context. Can be retrieved and passed to another context 1544 * (on the same fd) for both to use the same ppGTT and so share 1545 * address layouts, and avoid reloading the page tables on context 1546 * switches between themselves. 1547 * 1548 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY. 1549 */ 1550#define I915_CONTEXT_PARAM_VM 0x9 1551 1552/* 1553 * I915_CONTEXT_PARAM_ENGINES: 1554 * 1555 * Bind this context to operate on this subset of available engines. Henceforth, 1556 * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as 1557 * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0] 1558 * and upwards. Slots 0...N are filled in using the specified (class, instance). 1559 * Use 1560 * engine_class: I915_ENGINE_CLASS_INVALID, 1561 * engine_instance: I915_ENGINE_CLASS_INVALID_NONE 1562 * to specify a gap in the array that can be filled in later, e.g. by a 1563 * virtual engine used for load balancing. 1564 * 1565 * Setting the number of engines bound to the context to 0, by passing a zero 1566 * sized argument, will revert back to default settings. 1567 * 1568 * See struct i915_context_param_engines. 1569 * 1570 * Extensions: 1571 * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE) 1572 * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND) 1573 */ 1574#define I915_CONTEXT_PARAM_ENGINES 0xa 1575 1576/* 1577 * I915_CONTEXT_PARAM_PERSISTENCE: 1578 * 1579 * Allow the context and active rendering to survive the process until 1580 * completion. Persistence allows fire-and-forget clients to queue up a 1581 * bunch of work, hand the output over to a display server and then quit. 1582 * If the context is marked as not persistent, upon closing (either via 1583 * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure 1584 * or process termination), the context and any outstanding requests will be 1585 * cancelled (and exported fences for cancelled requests marked as -EIO). 1586 * 1587 * By default, new contexts allow persistence. 1588 */ 1589#define I915_CONTEXT_PARAM_PERSISTENCE 0xb 1590/* Must be kept compact -- no holes and well documented */ 1591 1592 __u64 value; 1593}; 1594 1595/** 1596 * Context SSEU programming 1597 * 1598 * It may be necessary for either functional or performance reason to configure 1599 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/ 1600 * Sub-slice/EU). 1601 * 1602 * This is done by configuring SSEU configuration using the below 1603 * @struct drm_i915_gem_context_param_sseu for every supported engine which 1604 * userspace intends to use. 1605 * 1606 * Not all GPUs or engines support this functionality in which case an error 1607 * code -ENODEV will be returned. 1608 * 1609 * Also, flexibility of possible SSEU configuration permutations varies between 1610 * GPU generations and software imposed limitations. Requesting such a 1611 * combination will return an error code of -EINVAL. 1612 * 1613 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in 1614 * favour of a single global setting. 1615 */ 1616struct drm_i915_gem_context_param_sseu { 1617 /* 1618 * Engine class & instance to be configured or queried. 1619 */ 1620 struct i915_engine_class_instance engine; 1621 1622 /* 1623 * Unknown flags must be cleared to zero. 1624 */ 1625 __u32 flags; 1626#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0) 1627 1628 /* 1629 * Mask of slices to enable for the context. Valid values are a subset 1630 * of the bitmask value returned for I915_PARAM_SLICE_MASK. 1631 */ 1632 __u64 slice_mask; 1633 1634 /* 1635 * Mask of subslices to enable for the context. Valid values are a 1636 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK. 1637 */ 1638 __u64 subslice_mask; 1639 1640 /* 1641 * Minimum/Maximum number of EUs to enable per subslice for the 1642 * context. min_eus_per_subslice must be inferior or equal to 1643 * max_eus_per_subslice. 1644 */ 1645 __u16 min_eus_per_subslice; 1646 __u16 max_eus_per_subslice; 1647 1648 /* 1649 * Unused for now. Must be cleared to zero. 1650 */ 1651 __u32 rsvd; 1652}; 1653 1654/* 1655 * i915_context_engines_load_balance: 1656 * 1657 * Enable load balancing across this set of engines. 1658 * 1659 * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when 1660 * used will proxy the execbuffer request onto one of the set of engines 1661 * in such a way as to distribute the load evenly across the set. 1662 * 1663 * The set of engines must be compatible (e.g. the same HW class) as they 1664 * will share the same logical GPU context and ring. 1665 * 1666 * To intermix rendering with the virtual engine and direct rendering onto 1667 * the backing engines (bypassing the load balancing proxy), the context must 1668 * be defined to use a single timeline for all engines. 1669 */ 1670struct i915_context_engines_load_balance { 1671 struct i915_user_extension base; 1672 1673 __u16 engine_index; 1674 __u16 num_siblings; 1675 __u32 flags; /* all undefined flags must be zero */ 1676 1677 __u64 mbz64; /* reserved for future use; must be zero */ 1678 1679 struct i915_engine_class_instance engines[0]; 1680} __attribute__((packed)); 1681 1682#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \ 1683 struct i915_user_extension base; \ 1684 __u16 engine_index; \ 1685 __u16 num_siblings; \ 1686 __u32 flags; \ 1687 __u64 mbz64; \ 1688 struct i915_engine_class_instance engines[N__]; \ 1689} __attribute__((packed)) name__ 1690 1691/* 1692 * i915_context_engines_bond: 1693 * 1694 * Constructed bonded pairs for execution within a virtual engine. 1695 * 1696 * All engines are equal, but some are more equal than others. Given 1697 * the distribution of resources in the HW, it may be preferable to run 1698 * a request on a given subset of engines in parallel to a request on a 1699 * specific engine. We enable this selection of engines within a virtual 1700 * engine by specifying bonding pairs, for any given master engine we will 1701 * only execute on one of the corresponding siblings within the virtual engine. 1702 * 1703 * To execute a request in parallel on the master engine and a sibling requires 1704 * coordination with a I915_EXEC_FENCE_SUBMIT. 1705 */ 1706struct i915_context_engines_bond { 1707 struct i915_user_extension base; 1708 1709 struct i915_engine_class_instance master; 1710 1711 __u16 virtual_index; /* index of virtual engine in ctx->engines[] */ 1712 __u16 num_bonds; 1713 1714 __u64 flags; /* all undefined flags must be zero */ 1715 __u64 mbz64[4]; /* reserved for future use; must be zero */ 1716 1717 struct i915_engine_class_instance engines[0]; 1718} __attribute__((packed)); 1719 1720#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \ 1721 struct i915_user_extension base; \ 1722 struct i915_engine_class_instance master; \ 1723 __u16 virtual_index; \ 1724 __u16 num_bonds; \ 1725 __u64 flags; \ 1726 __u64 mbz64[4]; \ 1727 struct i915_engine_class_instance engines[N__]; \ 1728} __attribute__((packed)) name__ 1729 1730struct i915_context_param_engines { 1731 __u64 extensions; /* linked chain of extension blocks, 0 terminates */ 1732#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */ 1733#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */ 1734 struct i915_engine_class_instance engines[0]; 1735} __attribute__((packed)); 1736 1737#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \ 1738 __u64 extensions; \ 1739 struct i915_engine_class_instance engines[N__]; \ 1740} __attribute__((packed)) name__ 1741 1742struct drm_i915_gem_context_create_ext_setparam { 1743#define I915_CONTEXT_CREATE_EXT_SETPARAM 0 1744 struct i915_user_extension base; 1745 struct drm_i915_gem_context_param param; 1746}; 1747 1748struct drm_i915_gem_context_create_ext_clone { 1749#define I915_CONTEXT_CREATE_EXT_CLONE 1 1750 struct i915_user_extension base; 1751 __u32 clone_id; 1752 __u32 flags; 1753#define I915_CONTEXT_CLONE_ENGINES (1u << 0) 1754#define I915_CONTEXT_CLONE_FLAGS (1u << 1) 1755#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2) 1756#define I915_CONTEXT_CLONE_SSEU (1u << 3) 1757#define I915_CONTEXT_CLONE_TIMELINE (1u << 4) 1758#define I915_CONTEXT_CLONE_VM (1u << 5) 1759#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1) 1760 __u64 rsvd; 1761}; 1762 1763struct drm_i915_gem_context_destroy { 1764 __u32 ctx_id; 1765 __u32 pad; 1766}; 1767 1768/* 1769 * DRM_I915_GEM_VM_CREATE - 1770 * 1771 * Create a new virtual memory address space (ppGTT) for use within a context 1772 * on the same file. Extensions can be provided to configure exactly how the 1773 * address space is setup upon creation. 1774 * 1775 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is 1776 * returned in the outparam @id. 1777 * 1778 * No flags are defined, with all bits reserved and must be zero. 1779 * 1780 * An extension chain maybe provided, starting with @extensions, and terminated 1781 * by the @next_extension being 0. Currently, no extensions are defined. 1782 * 1783 * DRM_I915_GEM_VM_DESTROY - 1784 * 1785 * Destroys a previously created VM id, specified in @id. 1786 * 1787 * No extensions or flags are allowed currently, and so must be zero. 1788 */ 1789struct drm_i915_gem_vm_control { 1790 __u64 extensions; 1791 __u32 flags; 1792 __u32 vm_id; 1793}; 1794 1795struct drm_i915_reg_read { 1796 /* 1797 * Register offset. 1798 * For 64bit wide registers where the upper 32bits don't immediately 1799 * follow the lower 32bits, the offset of the lower 32bits must 1800 * be specified 1801 */ 1802 __u64 offset; 1803#define I915_REG_READ_8B_WA (1ul << 0) 1804 1805 __u64 val; /* Return value */ 1806}; 1807 1808/* Known registers: 1809 * 1810 * Render engine timestamp - 0x2358 + 64bit - gen7+ 1811 * - Note this register returns an invalid value if using the default 1812 * single instruction 8byte read, in order to workaround that pass 1813 * flag I915_REG_READ_8B_WA in offset field. 1814 * 1815 */ 1816 1817struct drm_i915_reset_stats { 1818 __u32 ctx_id; 1819 __u32 flags; 1820 1821 /* All resets since boot/module reload, for all contexts */ 1822 __u32 reset_count; 1823 1824 /* Number of batches lost when active in GPU, for this context */ 1825 __u32 batch_active; 1826 1827 /* Number of batches lost pending for execution, for this context */ 1828 __u32 batch_pending; 1829 1830 __u32 pad; 1831}; 1832 1833struct drm_i915_gem_userptr { 1834 __u64 user_ptr; 1835 __u64 user_size; 1836 __u32 flags; 1837#define I915_USERPTR_READ_ONLY 0x1 1838#define I915_USERPTR_UNSYNCHRONIZED 0x80000000 1839 /** 1840 * Returned handle for the object. 1841 * 1842 * Object handles are nonzero. 1843 */ 1844 __u32 handle; 1845}; 1846 1847enum drm_i915_oa_format { 1848 I915_OA_FORMAT_A13 = 1, /* HSW only */ 1849 I915_OA_FORMAT_A29, /* HSW only */ 1850 I915_OA_FORMAT_A13_B8_C8, /* HSW only */ 1851 I915_OA_FORMAT_B4_C8, /* HSW only */ 1852 I915_OA_FORMAT_A45_B8_C8, /* HSW only */ 1853 I915_OA_FORMAT_B4_C8_A16, /* HSW only */ 1854 I915_OA_FORMAT_C4_B8, /* HSW+ */ 1855 1856 /* Gen8+ */ 1857 I915_OA_FORMAT_A12, 1858 I915_OA_FORMAT_A12_B8_C8, 1859 I915_OA_FORMAT_A32u40_A4u32_B8_C8, 1860 1861 I915_OA_FORMAT_MAX /* non-ABI */ 1862}; 1863 1864enum drm_i915_perf_property_id { 1865 /** 1866 * Open the stream for a specific context handle (as used with 1867 * execbuffer2). A stream opened for a specific context this way 1868 * won't typically require root privileges. 1869 * 1870 * This property is available in perf revision 1. 1871 */ 1872 DRM_I915_PERF_PROP_CTX_HANDLE = 1, 1873 1874 /** 1875 * A value of 1 requests the inclusion of raw OA unit reports as 1876 * part of stream samples. 1877 * 1878 * This property is available in perf revision 1. 1879 */ 1880 DRM_I915_PERF_PROP_SAMPLE_OA, 1881 1882 /** 1883 * The value specifies which set of OA unit metrics should be 1884 * be configured, defining the contents of any OA unit reports. 1885 * 1886 * This property is available in perf revision 1. 1887 */ 1888 DRM_I915_PERF_PROP_OA_METRICS_SET, 1889 1890 /** 1891 * The value specifies the size and layout of OA unit reports. 1892 * 1893 * This property is available in perf revision 1. 1894 */ 1895 DRM_I915_PERF_PROP_OA_FORMAT, 1896 1897 /** 1898 * Specifying this property implicitly requests periodic OA unit 1899 * sampling and (at least on Haswell) the sampling frequency is derived 1900 * from this exponent as follows: 1901 * 1902 * 80ns * 2^(period_exponent + 1) 1903 * 1904 * This property is available in perf revision 1. 1905 */ 1906 DRM_I915_PERF_PROP_OA_EXPONENT, 1907 1908 /** 1909 * Specifying this property is only valid when specify a context to 1910 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property 1911 * will hold preemption of the particular context we want to gather 1912 * performance data about. The execbuf2 submissions must include a 1913 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply. 1914 * 1915 * This property is available in perf revision 3. 1916 */ 1917 DRM_I915_PERF_PROP_HOLD_PREEMPTION, 1918 1919 DRM_I915_PERF_PROP_MAX /* non-ABI */ 1920}; 1921 1922struct drm_i915_perf_open_param { 1923 __u32 flags; 1924#define I915_PERF_FLAG_FD_CLOEXEC (1<<0) 1925#define I915_PERF_FLAG_FD_NONBLOCK (1<<1) 1926#define I915_PERF_FLAG_DISABLED (1<<2) 1927 1928 /** The number of u64 (id, value) pairs */ 1929 __u32 num_properties; 1930 1931 /** 1932 * Pointer to array of u64 (id, value) pairs configuring the stream 1933 * to open. 1934 */ 1935 __u64 properties_ptr; 1936}; 1937 1938/** 1939 * Enable data capture for a stream that was either opened in a disabled state 1940 * via I915_PERF_FLAG_DISABLED or was later disabled via 1941 * I915_PERF_IOCTL_DISABLE. 1942 * 1943 * It is intended to be cheaper to disable and enable a stream than it may be 1944 * to close and re-open a stream with the same configuration. 1945 * 1946 * It's undefined whether any pending data for the stream will be lost. 1947 * 1948 * This ioctl is available in perf revision 1. 1949 */ 1950#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0) 1951 1952/** 1953 * Disable data capture for a stream. 1954 * 1955 * It is an error to try and read a stream that is disabled. 1956 * 1957 * This ioctl is available in perf revision 1. 1958 */ 1959#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1) 1960 1961/** 1962 * Change metrics_set captured by a stream. 1963 * 1964 * If the stream is bound to a specific context, the configuration change 1965 * will performed inline with that context such that it takes effect before 1966 * the next execbuf submission. 1967 * 1968 * Returns the previously bound metrics set id, or a negative error code. 1969 * 1970 * This ioctl is available in perf revision 2. 1971 */ 1972#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2) 1973 1974/** 1975 * Common to all i915 perf records 1976 */ 1977struct drm_i915_perf_record_header { 1978 __u32 type; 1979 __u16 pad; 1980 __u16 size; 1981}; 1982 1983enum drm_i915_perf_record_type { 1984 1985 /** 1986 * Samples are the work horse record type whose contents are extensible 1987 * and defined when opening an i915 perf stream based on the given 1988 * properties. 1989 * 1990 * Boolean properties following the naming convention 1991 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in 1992 * every sample. 1993 * 1994 * The order of these sample properties given by userspace has no 1995 * affect on the ordering of data within a sample. The order is 1996 * documented here. 1997 * 1998 * struct { 1999 * struct drm_i915_perf_record_header header; 2000 *
2001 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA 2002 * }; 2003 */ 2004 DRM_I915_PERF_RECORD_SAMPLE = 1, 2005 2006 /* 2007 * Indicates that one or more OA reports were not written by the 2008 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT 2009 * command collides with periodic sampling - which would be more likely 2010 * at higher sampling frequencies. 2011 */ 2012 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2, 2013 2014 /** 2015 * An error occurred that resulted in all pending OA reports being lost. 2016 */ 2017 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3, 2018 2019 DRM_I915_PERF_RECORD_MAX /* non-ABI */ 2020}; 2021 2022/** 2023 * Structure to upload perf dynamic configuration into the kernel. 2024 */ 2025struct drm_i915_perf_oa_config { 2026 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */ 2027 char uuid[36]; 2028 2029 __u32 n_mux_regs; 2030 __u32 n_boolean_regs; 2031 __u32 n_flex_regs; 2032 2033 /* 2034 * These fields are pointers to tuples of u32 values (register address, 2035 * value). For example the expected length of the buffer pointed by 2036 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs). 2037 */ 2038 __u64 mux_regs_ptr; 2039 __u64 boolean_regs_ptr; 2040 __u64 flex_regs_ptr; 2041}; 2042 2043struct drm_i915_query_item { 2044 __u64 query_id; 2045#define DRM_I915_QUERY_TOPOLOGY_INFO 1 2046#define DRM_I915_QUERY_ENGINE_INFO 2 2047#define DRM_I915_QUERY_PERF_CONFIG 3 2048/* Must be kept compact -- no holes and well documented */ 2049 2050 /* 2051 * When set to zero by userspace, this is filled with the size of the 2052 * data to be written at the data_ptr pointer. The kernel sets this 2053 * value to a negative value to signal an error on a particular query 2054 * item. 2055 */ 2056 __s32 length; 2057 2058 /* 2059 * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0. 2060 * 2061 * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the 2062 * following : 2063 * - DRM_I915_QUERY_PERF_CONFIG_LIST 2064 * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2065 * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID 2066 */ 2067 __u32 flags; 2068#define DRM_I915_QUERY_PERF_CONFIG_LIST 1 2069#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2 2070#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3 2071 2072 /* 2073 * Data will be written at the location pointed by data_ptr when the 2074 * value of length matches the length of the data to be written by the 2075 * kernel. 2076 */ 2077 __u64 data_ptr; 2078}; 2079 2080struct drm_i915_query { 2081 __u32 num_items; 2082 2083 /* 2084 * Unused for now. Must be cleared to zero. 2085 */ 2086 __u32 flags; 2087 2088 /* 2089 * This points to an array of num_items drm_i915_query_item structures. 2090 */ 2091 __u64 items_ptr; 2092}; 2093 2094/* 2095 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO : 2096 * 2097 * data: contains the 3 pieces of information : 2098 * 2099 * - the slice mask with one bit per slice telling whether a slice is 2100 * available. The availability of slice X can be queried with the following 2101 * formula : 2102 * 2103 * (data[X / 8] >> (X % 8)) & 1 2104 * 2105 * - the subslice mask for each slice with one bit per subslice telling 2106 * whether a subslice is available. Gen12 has dual-subslices, which are 2107 * similar to two gen11 subslices. For gen12, this array represents dual- 2108 * subslices. The availability of subslice Y in slice X can be queried 2109 * with the following formula : 2110 * 2111 * (data[subslice_offset + 2112 * X * subslice_stride + 2113 * Y / 8] >> (Y % 8)) & 1 2114 * 2115 * - the EU mask for each subslice in each slice with one bit per EU telling 2116 * whether an EU is available. The availability of EU Z in subslice Y in 2117 * slice X can be queried with the following formula : 2118 * 2119 * (data[eu_offset + 2120 * (X * max_subslices + Y) * eu_stride + 2121 * Z / 8] >> (Z % 8)) & 1 2122 */ 2123struct drm_i915_query_topology_info { 2124 /* 2125 * Unused for now. Must be cleared to zero. 2126 */ 2127 __u16 flags; 2128 2129 __u16 max_slices; 2130 __u16 max_subslices; 2131 __u16 max_eus_per_subslice; 2132 2133 /* 2134 * Offset in data[] at which the subslice masks are stored. 2135 */ 2136 __u16 subslice_offset; 2137 2138 /* 2139 * Stride at which each of the subslice masks for each slice are 2140 * stored. 2141 */ 2142 __u16 subslice_stride; 2143 2144 /* 2145 * Offset in data[] at which the EU masks are stored. 2146 */ 2147 __u16 eu_offset; 2148 2149 /* 2150 * Stride at which each of the EU masks for each subslice are stored. 2151 */ 2152 __u16 eu_stride; 2153 2154 __u8 data[]; 2155}; 2156 2157/** 2158 * struct drm_i915_engine_info 2159 * 2160 * Describes one engine and it's capabilities as known to the driver. 2161 */ 2162struct drm_i915_engine_info { 2163 /** Engine class and instance. */ 2164 struct i915_engine_class_instance engine; 2165 2166 /** Reserved field. */ 2167 __u32 rsvd0; 2168 2169 /** Engine flags. */ 2170 __u64 flags; 2171 2172 /** Capabilities of this engine. */ 2173 __u64 capabilities; 2174#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0) 2175#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1) 2176 2177 /** Reserved fields. */ 2178 __u64 rsvd1[4]; 2179}; 2180 2181/** 2182 * struct drm_i915_query_engine_info 2183 * 2184 * Engine info query enumerates all engines known to the driver by filling in 2185 * an array of struct drm_i915_engine_info structures. 2186 */ 2187struct drm_i915_query_engine_info { 2188 /** Number of struct drm_i915_engine_info structs following. */ 2189 __u32 num_engines; 2190 2191 /** MBZ */ 2192 __u32 rsvd[3]; 2193 2194 /** Marker for drm_i915_engine_info structures. */ 2195 struct drm_i915_engine_info engines[]; 2196}; 2197 2198/* 2199 * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG. 2200 */ 2201struct drm_i915_query_perf_config { 2202 union { 2203 /* 2204 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets 2205 * this fields to the number of configurations available. 2206 */ 2207 __u64 n_configs; 2208 2209 /* 2210 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, 2211 * i915 will use the value in this field as configuration 2212 * identifier to decide what data to write into config_ptr. 2213 */ 2214 __u64 config; 2215 2216 /* 2217 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, 2218 * i915 will use the value in this field as configuration 2219 * identifier to decide what data to write into config_ptr. 2220 * 2221 * String formatted like "%08x-%04x-%04x-%04x-%012x" 2222 */ 2223 char uuid[36]; 2224 }; 2225 2226 /* 2227 * Unused for now. Must be cleared to zero. 2228 */ 2229 __u32 flags; 2230 2231 /* 2232 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will 2233 * write an array of __u64 of configuration identifiers. 2234 * 2235 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will 2236 * write a struct drm_i915_perf_oa_config. If the following fields of 2237 * drm_i915_perf_oa_config are set not set to 0, i915 will write into 2238 * the associated pointers the values of submitted when the 2239 * configuration was created : 2240 * 2241 * - n_mux_regs 2242 * - n_boolean_regs 2243 * - n_flex_regs 2244 */ 2245 __u8 data[]; 2246}; 2247 2248#if defined(__cplusplus) 2249} 2250#endif 2251 2252#endif /* _UAPI_I915_DRM_H_ */ 2253