1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
33#include <uapi/drm/i915_drm.h>
34#include <uapi/drm/drm_fourcc.h>
35
36#include <linux/io-mapping.h>
37#include <linux/i2c.h>
38#include <linux/i2c-algo-bit.h>
39#include <linux/backlight.h>
40#include <linux/hash.h>
41#include <linux/intel-iommu.h>
42#include <linux/kref.h>
43#include <linux/perf_event.h>
44#include <linux/pm_qos.h>
45#include <linux/reservation.h>
46#include <linux/shmem_fs.h>
47
48#include <drm/drmP.h>
49#include <drm/intel-gtt.h>
50#include <drm/drm_legacy.h>
51#include <drm/drm_gem.h>
52#include <drm/drm_auth.h>
53#include <drm/drm_cache.h>
54
55#include "i915_params.h"
56#include "i915_reg.h"
57#include "i915_utils.h"
58
59#include "intel_bios.h"
60#include "intel_device_info.h"
61#include "intel_display.h"
62#include "intel_dpll_mgr.h"
63#include "intel_lrc.h"
64#include "intel_opregion.h"
65#include "intel_ringbuffer.h"
66#include "intel_uncore.h"
67#include "intel_uc.h"
68
69#include "i915_gem.h"
70#include "i915_gem_context.h"
71#include "i915_gem_fence_reg.h"
72#include "i915_gem_object.h"
73#include "i915_gem_gtt.h"
74#include "i915_gem_timeline.h"
75
76#include "i915_request.h"
77#include "i915_vma.h"
78
79#include "intel_gvt.h"
80
81
82
83
84#define DRIVER_NAME "i915"
85#define DRIVER_DESC "Intel Graphics"
86#define DRIVER_DATE "20180308"
87#define DRIVER_TIMESTAMP 1520513379
88
89
90
91
92
93
94
95
96#define I915_STATE_WARN(condition, format...) ({ \
97 int __ret_warn_on = !!(condition); \
98 if (unlikely(__ret_warn_on)) \
99 if (!WARN(i915_modparams.verbose_state_checks, format)) \
100 DRM_ERROR(format); \
101 unlikely(__ret_warn_on); \
102})
103
104#define I915_STATE_WARN_ON(x) \
105 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
106
107#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
108bool __i915_inject_load_failure(const char *func, int line);
109#define i915_inject_load_failure() \
110 __i915_inject_load_failure(__func__, __LINE__)
111#else
112#define i915_inject_load_failure() false
113#endif
114
115typedef struct {
116 uint32_t val;
117} uint_fixed_16_16_t;
118
119#define FP_16_16_MAX ({ \
120 uint_fixed_16_16_t fp; \
121 fp.val = UINT_MAX; \
122 fp; \
123})
124
125static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
126{
127 if (val.val == 0)
128 return true;
129 return false;
130}
131
132static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
133{
134 uint_fixed_16_16_t fp;
135
136 WARN_ON(val > U16_MAX);
137
138 fp.val = val << 16;
139 return fp;
140}
141
142static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
143{
144 return DIV_ROUND_UP(fp.val, 1 << 16);
145}
146
147static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
148{
149 return fp.val >> 16;
150}
151
152static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
153 uint_fixed_16_16_t min2)
154{
155 uint_fixed_16_16_t min;
156
157 min.val = min(min1.val, min2.val);
158 return min;
159}
160
161static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
162 uint_fixed_16_16_t max2)
163{
164 uint_fixed_16_16_t max;
165
166 max.val = max(max1.val, max2.val);
167 return max;
168}
169
170static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
171{
172 uint_fixed_16_16_t fp;
173 WARN_ON(val > U32_MAX);
174 fp.val = (uint32_t) val;
175 return fp;
176}
177
178static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
179 uint_fixed_16_16_t d)
180{
181 return DIV_ROUND_UP(val.val, d.val);
182}
183
184static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
185 uint_fixed_16_16_t mul)
186{
187 uint64_t intermediate_val;
188
189 intermediate_val = (uint64_t) val * mul.val;
190 intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
191 WARN_ON(intermediate_val > U32_MAX);
192 return (uint32_t) intermediate_val;
193}
194
195static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
196 uint_fixed_16_16_t mul)
197{
198 uint64_t intermediate_val;
199
200 intermediate_val = (uint64_t) val.val * mul.val;
201 intermediate_val = intermediate_val >> 16;
202 return clamp_u64_to_fixed16(intermediate_val);
203}
204
205static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
206{
207 uint64_t interm_val;
208
209 interm_val = (uint64_t)val << 16;
210 interm_val = DIV_ROUND_UP_ULL(interm_val, d);
211 return clamp_u64_to_fixed16(interm_val);
212}
213
214static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
215 uint_fixed_16_16_t d)
216{
217 uint64_t interm_val;
218
219 interm_val = (uint64_t)val << 16;
220 interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
221 WARN_ON(interm_val > U32_MAX);
222 return (uint32_t) interm_val;
223}
224
225static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
226 uint_fixed_16_16_t mul)
227{
228 uint64_t intermediate_val;
229
230 intermediate_val = (uint64_t) val * mul.val;
231 return clamp_u64_to_fixed16(intermediate_val);
232}
233
234static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
235 uint_fixed_16_16_t add2)
236{
237 uint64_t interm_sum;
238
239 interm_sum = (uint64_t) add1.val + add2.val;
240 return clamp_u64_to_fixed16(interm_sum);
241}
242
243static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
244 uint32_t add2)
245{
246 uint64_t interm_sum;
247 uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
248
249 interm_sum = (uint64_t) add1.val + interm_add2.val;
250 return clamp_u64_to_fixed16(interm_sum);
251}
252
253enum hpd_pin {
254 HPD_NONE = 0,
255 HPD_TV = HPD_NONE,
256 HPD_CRT,
257 HPD_SDVO_B,
258 HPD_SDVO_C,
259 HPD_PORT_A,
260 HPD_PORT_B,
261 HPD_PORT_C,
262 HPD_PORT_D,
263 HPD_PORT_E,
264 HPD_NUM_PINS
265};
266
267#define for_each_hpd_pin(__pin) \
268 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
269
270#define HPD_STORM_DEFAULT_THRESHOLD 5
271
272struct i915_hotplug {
273 struct work_struct hotplug_work;
274
275 struct {
276 unsigned long last_jiffies;
277 int count;
278 enum {
279 HPD_ENABLED = 0,
280 HPD_DISABLED = 1,
281 HPD_MARK_DISABLED = 2
282 } state;
283 } stats[HPD_NUM_PINS];
284 u32 event_bits;
285 struct delayed_work reenable_work;
286
287 struct intel_digital_port *irq_port[I915_MAX_PORTS];
288 u32 long_port_mask;
289 u32 short_port_mask;
290 struct work_struct dig_port_work;
291
292 struct work_struct poll_init_work;
293 bool poll_enabled;
294
295 unsigned int hpd_storm_threshold;
296
297
298
299
300
301
302
303
304 struct workqueue_struct *dp_wq;
305};
306
307#define I915_GEM_GPU_DOMAINS \
308 (I915_GEM_DOMAIN_RENDER | \
309 I915_GEM_DOMAIN_SAMPLER | \
310 I915_GEM_DOMAIN_COMMAND | \
311 I915_GEM_DOMAIN_INSTRUCTION | \
312 I915_GEM_DOMAIN_VERTEX)
313
314struct drm_i915_private;
315struct i915_mm_struct;
316struct i915_mmu_object;
317
318struct drm_i915_file_private {
319 struct drm_i915_private *dev_priv;
320 struct drm_file *file;
321
322 struct {
323 spinlock_t lock;
324 struct list_head request_list;
325
326
327
328
329
330#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
331 } mm;
332 struct idr context_idr;
333
334 struct intel_rps_client {
335 atomic_t boosts;
336 } rps_client;
337
338 unsigned int bsd_engine;
339
340
341
342
343
344
345
346#define I915_MAX_CLIENT_CONTEXT_BANS 3
347 atomic_t context_bans;
348};
349
350
351
352
353
354
355
356
357
358
359
360#define DRIVER_MAJOR 1
361#define DRIVER_MINOR 6
362#define DRIVER_PATCHLEVEL 0
363
364struct intel_overlay;
365struct intel_overlay_error_state;
366
367struct sdvo_device_mapping {
368 u8 initialized;
369 u8 dvo_port;
370 u8 slave_addr;
371 u8 dvo_wiring;
372 u8 i2c_pin;
373 u8 ddc_pin;
374};
375
376struct intel_connector;
377struct intel_encoder;
378struct intel_atomic_state;
379struct intel_crtc_state;
380struct intel_initial_plane_config;
381struct intel_crtc;
382struct intel_limit;
383struct dpll;
384struct intel_cdclk_state;
385
386struct drm_i915_display_funcs {
387 void (*get_cdclk)(struct drm_i915_private *dev_priv,
388 struct intel_cdclk_state *cdclk_state);
389 void (*set_cdclk)(struct drm_i915_private *dev_priv,
390 const struct intel_cdclk_state *cdclk_state);
391 int (*get_fifo_size)(struct drm_i915_private *dev_priv,
392 enum i9xx_plane_id i9xx_plane);
393 int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
394 int (*compute_intermediate_wm)(struct drm_device *dev,
395 struct intel_crtc *intel_crtc,
396 struct intel_crtc_state *newstate);
397 void (*initial_watermarks)(struct intel_atomic_state *state,
398 struct intel_crtc_state *cstate);
399 void (*atomic_update_watermarks)(struct intel_atomic_state *state,
400 struct intel_crtc_state *cstate);
401 void (*optimize_watermarks)(struct intel_atomic_state *state,
402 struct intel_crtc_state *cstate);
403 int (*compute_global_watermarks)(struct drm_atomic_state *state);
404 void (*update_wm)(struct intel_crtc *crtc);
405 int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
406
407
408 bool (*get_pipe_config)(struct intel_crtc *,
409 struct intel_crtc_state *);
410 void (*get_initial_plane_config)(struct intel_crtc *,
411 struct intel_initial_plane_config *);
412 int (*crtc_compute_clock)(struct intel_crtc *crtc,
413 struct intel_crtc_state *crtc_state);
414 void (*crtc_enable)(struct intel_crtc_state *pipe_config,
415 struct drm_atomic_state *old_state);
416 void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
417 struct drm_atomic_state *old_state);
418 void (*update_crtcs)(struct drm_atomic_state *state);
419 void (*audio_codec_enable)(struct intel_encoder *encoder,
420 const struct intel_crtc_state *crtc_state,
421 const struct drm_connector_state *conn_state);
422 void (*audio_codec_disable)(struct intel_encoder *encoder,
423 const struct intel_crtc_state *old_crtc_state,
424 const struct drm_connector_state *old_conn_state);
425 void (*fdi_link_train)(struct intel_crtc *crtc,
426 const struct intel_crtc_state *crtc_state);
427 void (*init_clock_gating)(struct drm_i915_private *dev_priv);
428 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
429
430
431
432
433
434
435 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
436 void (*load_luts)(struct drm_crtc_state *crtc_state);
437};
438
439#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
440#define CSR_VERSION_MAJOR(version) ((version) >> 16)
441#define CSR_VERSION_MINOR(version) ((version) & 0xffff)
442
443struct intel_csr {
444 struct work_struct work;
445 const char *fw_path;
446 uint32_t *dmc_payload;
447 uint32_t dmc_fw_size;
448 uint32_t version;
449 uint32_t mmio_count;
450 i915_reg_t mmioaddr[8];
451 uint32_t mmiodata[8];
452 uint32_t dc_state;
453 uint32_t allowed_dc_mask;
454};
455
456struct intel_display_error_state;
457
458struct i915_gpu_state {
459 struct kref ref;
460 ktime_t time;
461 ktime_t boottime;
462 ktime_t uptime;
463
464 struct drm_i915_private *i915;
465
466 char error_msg[128];
467 bool simulated;
468 bool awake;
469 bool wakelock;
470 bool suspended;
471 int iommu;
472 u32 reset_count;
473 u32 suspend_count;
474 struct intel_device_info device_info;
475 struct intel_driver_caps driver_caps;
476 struct i915_params params;
477
478 struct i915_error_uc {
479 struct intel_uc_fw guc_fw;
480 struct intel_uc_fw huc_fw;
481 struct drm_i915_error_object *guc_log;
482 } uc;
483
484
485 u32 eir;
486 u32 pgtbl_er;
487 u32 ier;
488 u32 gtier[4], ngtier;
489 u32 ccid;
490 u32 derrmr;
491 u32 forcewake;
492 u32 error;
493 u32 err_int;
494 u32 fault_data0;
495 u32 fault_data1;
496 u32 done_reg;
497 u32 gac_eco;
498 u32 gam_ecochk;
499 u32 gab_ctl;
500 u32 gfx_mode;
501
502 u32 nfence;
503 u64 fence[I915_MAX_NUM_FENCES];
504 struct intel_overlay_error_state *overlay;
505 struct intel_display_error_state *display;
506
507 struct drm_i915_error_engine {
508 int engine_id;
509
510 bool idle;
511 bool waiting;
512 int num_waiters;
513 unsigned long hangcheck_timestamp;
514 bool hangcheck_stalled;
515 enum intel_engine_hangcheck_action hangcheck_action;
516 struct i915_address_space *vm;
517 int num_requests;
518 u32 reset_count;
519
520
521 u32 rq_head, rq_post, rq_tail;
522
523
524 u32 cpu_ring_head;
525 u32 cpu_ring_tail;
526
527 u32 last_seqno;
528
529
530 u32 start;
531 u32 tail;
532 u32 head;
533 u32 ctl;
534 u32 mode;
535 u32 hws;
536 u32 ipeir;
537 u32 ipehr;
538 u32 bbstate;
539 u32 instpm;
540 u32 instps;
541 u32 seqno;
542 u64 bbaddr;
543 u64 acthd;
544 u32 fault_reg;
545 u64 faddr;
546 u32 rc_psmi;
547 u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
548 struct intel_instdone instdone;
549
550 struct drm_i915_error_context {
551 char comm[TASK_COMM_LEN];
552 pid_t pid;
553 u32 handle;
554 u32 hw_id;
555 int priority;
556 int ban_score;
557 int active;
558 int guilty;
559 bool bannable;
560 } context;
561
562 struct drm_i915_error_object {
563 u64 gtt_offset;
564 u64 gtt_size;
565 int page_count;
566 int unused;
567 u32 *pages[0];
568 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
569
570 struct drm_i915_error_object **user_bo;
571 long user_bo_count;
572
573 struct drm_i915_error_object *wa_ctx;
574 struct drm_i915_error_object *default_state;
575
576 struct drm_i915_error_request {
577 long jiffies;
578 pid_t pid;
579 u32 context;
580 int priority;
581 int ban_score;
582 u32 seqno;
583 u32 head;
584 u32 tail;
585 } *requests, execlist[EXECLIST_MAX_PORTS];
586 unsigned int num_ports;
587
588 struct drm_i915_error_waiter {
589 char comm[TASK_COMM_LEN];
590 pid_t pid;
591 u32 seqno;
592 } *waiters;
593
594 struct {
595 u32 gfx_mode;
596 union {
597 u64 pdp[4];
598 u32 pp_dir_base;
599 };
600 } vm_info;
601 } engine[I915_NUM_ENGINES];
602
603 struct drm_i915_error_buffer {
604 u32 size;
605 u32 name;
606 u32 rseqno[I915_NUM_ENGINES], wseqno;
607 u64 gtt_offset;
608 u32 read_domains;
609 u32 write_domain;
610 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
611 u32 tiling:2;
612 u32 dirty:1;
613 u32 purgeable:1;
614 u32 userptr:1;
615 s32 engine:4;
616 u32 cache_level:3;
617 } *active_bo[I915_NUM_ENGINES], *pinned_bo;
618 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
619 struct i915_address_space *active_vm[I915_NUM_ENGINES];
620};
621
622enum i915_cache_level {
623 I915_CACHE_NONE = 0,
624 I915_CACHE_LLC,
625 I915_CACHE_L3_LLC,
626
627
628
629 I915_CACHE_WT,
630};
631
632#define I915_COLOR_UNEVICTABLE (-1)
633
634enum fb_op_origin {
635 ORIGIN_GTT,
636 ORIGIN_CPU,
637 ORIGIN_CS,
638 ORIGIN_FLIP,
639 ORIGIN_DIRTYFB,
640};
641
642struct intel_fbc {
643
644
645 struct mutex lock;
646 unsigned threshold;
647 unsigned int possible_framebuffer_bits;
648 unsigned int busy_bits;
649 unsigned int visible_pipes_mask;
650 struct intel_crtc *crtc;
651
652 struct drm_mm_node compressed_fb;
653 struct drm_mm_node *compressed_llb;
654
655 bool false_color;
656
657 bool enabled;
658 bool active;
659
660 bool underrun_detected;
661 struct work_struct underrun_work;
662
663
664
665
666
667
668 struct intel_fbc_state_cache {
669 struct i915_vma *vma;
670 unsigned long flags;
671
672 struct {
673 unsigned int mode_flags;
674 uint32_t hsw_bdw_pixel_rate;
675 } crtc;
676
677 struct {
678 unsigned int rotation;
679 int src_w;
680 int src_h;
681 bool visible;
682
683
684
685
686
687
688 int adjusted_x;
689 int adjusted_y;
690
691 int y;
692 } plane;
693
694 struct {
695 const struct drm_format_info *format;
696 unsigned int stride;
697 } fb;
698 } state_cache;
699
700
701
702
703
704
705
706
707 struct intel_fbc_reg_params {
708 struct i915_vma *vma;
709 unsigned long flags;
710
711 struct {
712 enum pipe pipe;
713 enum i9xx_plane_id i9xx_plane;
714 unsigned int fence_y_offset;
715 } crtc;
716
717 struct {
718 const struct drm_format_info *format;
719 unsigned int stride;
720 } fb;
721
722 int cfb_size;
723 unsigned int gen9_wa_cfb_stride;
724 } params;
725
726 struct intel_fbc_work {
727 bool scheduled;
728 u64 scheduled_vblank;
729 struct work_struct work;
730 } work;
731
732 const char *no_fbc_reason;
733};
734
735
736
737
738
739
740enum drrs_refresh_rate_type {
741 DRRS_HIGH_RR,
742 DRRS_LOW_RR,
743 DRRS_MAX_RR,
744};
745
746enum drrs_support_type {
747 DRRS_NOT_SUPPORTED = 0,
748 STATIC_DRRS_SUPPORT = 1,
749 SEAMLESS_DRRS_SUPPORT = 2
750};
751
752struct intel_dp;
753struct i915_drrs {
754 struct mutex mutex;
755 struct delayed_work work;
756 struct intel_dp *dp;
757 unsigned busy_frontbuffer_bits;
758 enum drrs_refresh_rate_type refresh_rate_type;
759 enum drrs_support_type type;
760};
761
762struct i915_psr {
763 struct mutex lock;
764 bool sink_support;
765 struct intel_dp *enabled;
766 bool active;
767 struct delayed_work work;
768 unsigned busy_frontbuffer_bits;
769 bool psr2_support;
770 bool aux_frame_sync;
771 bool link_standby;
772 bool y_cord_support;
773 bool colorimetry_support;
774 bool alpm;
775
776 void (*enable_source)(struct intel_dp *,
777 const struct intel_crtc_state *);
778 void (*disable_source)(struct intel_dp *,
779 const struct intel_crtc_state *);
780 void (*enable_sink)(struct intel_dp *);
781 void (*activate)(struct intel_dp *);
782 void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
783};
784
785enum intel_pch {
786 PCH_NONE = 0,
787 PCH_IBX,
788 PCH_CPT,
789 PCH_LPT,
790 PCH_SPT,
791 PCH_KBP,
792 PCH_CNP,
793 PCH_ICP,
794 PCH_NOP,
795};
796
797enum intel_sbi_destination {
798 SBI_ICLK,
799 SBI_MPHY,
800};
801
802#define QUIRK_LVDS_SSC_DISABLE (1<<1)
803#define QUIRK_INVERT_BRIGHTNESS (1<<2)
804#define QUIRK_BACKLIGHT_PRESENT (1<<3)
805#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
806#define QUIRK_INCREASE_T12_DELAY (1<<6)
807
808struct intel_fbdev;
809struct intel_fbc_work;
810
811struct intel_gmbus {
812 struct i2c_adapter adapter;
813#define GMBUS_FORCE_BIT_RETRY (1U << 31)
814 u32 force_bit;
815 u32 reg0;
816 i915_reg_t gpio_reg;
817 struct i2c_algo_bit_data bit_algo;
818 struct drm_i915_private *dev_priv;
819};
820
821struct i915_suspend_saved_registers {
822 u32 saveDSPARB;
823 u32 saveFBC_CONTROL;
824 u32 saveCACHE_MODE_0;
825 u32 saveMI_ARB_STATE;
826 u32 saveSWF0[16];
827 u32 saveSWF1[16];
828 u32 saveSWF3[3];
829 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
830 u32 savePCH_PORT_HOTPLUG;
831 u16 saveGCDGMBUS;
832};
833
834struct vlv_s0ix_state {
835
836 u32 wr_watermark;
837 u32 gfx_prio_ctrl;
838 u32 arb_mode;
839 u32 gfx_pend_tlb0;
840 u32 gfx_pend_tlb1;
841 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
842 u32 media_max_req_count;
843 u32 gfx_max_req_count;
844 u32 render_hwsp;
845 u32 ecochk;
846 u32 bsd_hwsp;
847 u32 blt_hwsp;
848 u32 tlb_rd_addr;
849
850
851 u32 g3dctl;
852 u32 gsckgctl;
853 u32 mbctl;
854
855
856 u32 ucgctl1;
857 u32 ucgctl3;
858 u32 rcgctl1;
859 u32 rcgctl2;
860 u32 rstctl;
861 u32 misccpctl;
862
863
864 u32 gfxpause;
865 u32 rpdeuhwtc;
866 u32 rpdeuc;
867 u32 ecobus;
868 u32 pwrdwnupctl;
869 u32 rp_down_timeout;
870 u32 rp_deucsw;
871 u32 rcubmabdtmr;
872 u32 rcedata;
873 u32 spare2gh;
874
875
876 u32 gt_imr;
877 u32 gt_ier;
878 u32 pm_imr;
879 u32 pm_ier;
880 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
881
882
883 u32 tilectl;
884 u32 gt_fifoctl;
885 u32 gtlc_wake_ctrl;
886 u32 gtlc_survive;
887 u32 pmwgicz;
888
889
890 u32 gu_ctl0;
891 u32 gu_ctl1;
892 u32 pcbr;
893 u32 clock_gate_dis2;
894};
895
896struct intel_rps_ei {
897 ktime_t ktime;
898 u32 render_c0;
899 u32 media_c0;
900};
901
902struct intel_rps {
903
904
905
906
907 struct work_struct work;
908 bool interrupts_enabled;
909 u32 pm_iir;
910
911
912 u32 pm_intrmsk_mbz;
913
914
915
916
917
918
919
920
921
922
923
924 u8 cur_freq;
925 u8 min_freq_softlimit;
926 u8 max_freq_softlimit;
927 u8 max_freq;
928 u8 min_freq;
929 u8 boost_freq;
930 u8 idle_freq;
931 u8 efficient_freq;
932 u8 rp1_freq;
933 u8 rp0_freq;
934 u16 gpll_ref_freq;
935
936 u8 up_threshold;
937 u8 down_threshold;
938
939 int last_adj;
940 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
941
942 bool enabled;
943 atomic_t num_waiters;
944 atomic_t boosts;
945
946
947 struct intel_rps_ei ei;
948};
949
950struct intel_rc6 {
951 bool enabled;
952 u64 prev_hw_residency[4];
953 u64 cur_residency[4];
954};
955
956struct intel_llc_pstate {
957 bool enabled;
958};
959
960struct intel_gen6_power_mgmt {
961 struct intel_rps rps;
962 struct intel_rc6 rc6;
963 struct intel_llc_pstate llc_pstate;
964};
965
966
967extern spinlock_t mchdev_lock;
968
969struct intel_ilk_power_mgmt {
970 u8 cur_delay;
971 u8 min_delay;
972 u8 max_delay;
973 u8 fmax;
974 u8 fstart;
975
976 u64 last_count1;
977 unsigned long last_time1;
978 unsigned long chipset_power;
979 u64 last_count2;
980 u64 last_time2;
981 unsigned long gfx_power;
982 u8 corr;
983
984 int c_m;
985 int r_t;
986};
987
988struct drm_i915_private;
989struct i915_power_well;
990
991struct i915_power_well_ops {
992
993
994
995
996
997
998 void (*sync_hw)(struct drm_i915_private *dev_priv,
999 struct i915_power_well *power_well);
1000
1001
1002
1003
1004
1005 void (*enable)(struct drm_i915_private *dev_priv,
1006 struct i915_power_well *power_well);
1007
1008
1009
1010
1011 void (*disable)(struct drm_i915_private *dev_priv,
1012 struct i915_power_well *power_well);
1013
1014 bool (*is_enabled)(struct drm_i915_private *dev_priv,
1015 struct i915_power_well *power_well);
1016};
1017
1018
1019struct i915_power_well {
1020 const char *name;
1021 bool always_on;
1022
1023 int count;
1024
1025 bool hw_enabled;
1026 u64 domains;
1027
1028 enum i915_power_well_id id;
1029
1030
1031
1032
1033 union {
1034 struct {
1035 enum dpio_phy phy;
1036 } bxt;
1037 struct {
1038
1039 u8 irq_pipe_mask;
1040
1041 bool has_vga:1;
1042 bool has_fuses:1;
1043 } hsw;
1044 };
1045 const struct i915_power_well_ops *ops;
1046};
1047
1048struct i915_power_domains {
1049
1050
1051
1052
1053 bool init_power_on;
1054 bool initializing;
1055 int power_well_count;
1056
1057 struct mutex lock;
1058 int domain_use_count[POWER_DOMAIN_NUM];
1059 struct i915_power_well *power_wells;
1060};
1061
1062#define MAX_L3_SLICES 2
1063struct intel_l3_parity {
1064 u32 *remap_info[MAX_L3_SLICES];
1065 struct work_struct error_work;
1066 int which_slice;
1067};
1068
1069struct i915_gem_mm {
1070
1071 struct drm_mm stolen;
1072
1073
1074 struct mutex stolen_lock;
1075
1076
1077 spinlock_t obj_lock;
1078
1079
1080
1081 struct list_head bound_list;
1082
1083
1084
1085
1086
1087 struct list_head unbound_list;
1088
1089
1090
1091
1092 struct list_head userfault_list;
1093
1094
1095
1096
1097 struct llist_head free_list;
1098 struct work_struct free_work;
1099 spinlock_t free_lock;
1100
1101
1102
1103
1104 atomic_t free_count;
1105
1106
1107
1108
1109 struct pagevec wc_stash;
1110
1111
1112
1113
1114 struct vfsmount *gemfs;
1115
1116
1117 struct i915_hw_ppgtt *aliasing_ppgtt;
1118
1119 struct notifier_block oom_notifier;
1120 struct notifier_block vmap_notifier;
1121 struct shrinker shrinker;
1122
1123
1124 struct list_head fence_list;
1125
1126
1127
1128
1129
1130
1131 struct workqueue_struct *userptr_wq;
1132
1133 u64 unordered_timeline;
1134
1135
1136 atomic_t bsd_engine_dispatch_index;
1137
1138
1139 uint32_t bit_6_swizzle_x;
1140
1141 uint32_t bit_6_swizzle_y;
1142
1143
1144 spinlock_t object_stat_lock;
1145 u64 object_memory;
1146 u32 object_count;
1147};
1148
1149struct drm_i915_error_state_buf {
1150 struct drm_i915_private *i915;
1151 unsigned bytes;
1152 unsigned size;
1153 int err;
1154 u8 *buf;
1155 loff_t start;
1156 loff_t pos;
1157};
1158
1159#define I915_IDLE_ENGINES_TIMEOUT (200)
1160
1161#define I915_RESET_TIMEOUT (10 * HZ)
1162#define I915_FENCE_TIMEOUT (10 * HZ)
1163
1164#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ)
1165#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ)
1166
1167struct i915_gpu_error {
1168
1169#define DRM_I915_HANGCHECK_PERIOD 1500
1170#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1171
1172 struct delayed_work hangcheck_work;
1173
1174
1175 spinlock_t lock;
1176
1177 struct i915_gpu_state *first_error;
1178
1179 atomic_t pending_fb_pin;
1180
1181 unsigned long missed_irq_rings;
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 unsigned long reset_count;
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237 unsigned long flags;
1238#define I915_RESET_BACKOFF 0
1239#define I915_RESET_HANDOFF 1
1240#define I915_RESET_MODESET 2
1241#define I915_WEDGED (BITS_PER_LONG - 1)
1242#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
1243
1244
1245 u32 reset_engine_count[I915_NUM_ENGINES];
1246
1247
1248
1249
1250
1251 wait_queue_head_t wait_queue;
1252
1253
1254
1255
1256
1257 wait_queue_head_t reset_queue;
1258
1259
1260 unsigned long test_irq_rings;
1261};
1262
1263enum modeset_restore {
1264 MODESET_ON_LID_OPEN,
1265 MODESET_DONE,
1266 MODESET_SUSPENDED,
1267};
1268
1269#define DP_AUX_A 0x40
1270#define DP_AUX_B 0x10
1271#define DP_AUX_C 0x20
1272#define DP_AUX_D 0x30
1273#define DP_AUX_F 0x60
1274
1275#define DDC_PIN_B 0x05
1276#define DDC_PIN_C 0x04
1277#define DDC_PIN_D 0x06
1278
1279struct ddi_vbt_port_info {
1280 int max_tmds_clock;
1281
1282
1283
1284
1285
1286
1287#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
1288 uint8_t hdmi_level_shift;
1289
1290 uint8_t supports_dvi:1;
1291 uint8_t supports_hdmi:1;
1292 uint8_t supports_dp:1;
1293 uint8_t supports_edp:1;
1294
1295 uint8_t alternate_aux_channel;
1296 uint8_t alternate_ddc_pin;
1297
1298 uint8_t dp_boost_level;
1299 uint8_t hdmi_boost_level;
1300 int dp_max_link_rate;
1301};
1302
1303enum psr_lines_to_wait {
1304 PSR_0_LINES_TO_WAIT = 0,
1305 PSR_1_LINE_TO_WAIT,
1306 PSR_4_LINES_TO_WAIT,
1307 PSR_8_LINES_TO_WAIT
1308};
1309
1310struct intel_vbt_data {
1311 struct drm_display_mode *lfp_lvds_vbt_mode;
1312 struct drm_display_mode *sdvo_lvds_vbt_mode;
1313
1314
1315 unsigned int int_tv_support:1;
1316 unsigned int lvds_dither:1;
1317 unsigned int lvds_vbt:1;
1318 unsigned int int_crt_support:1;
1319 unsigned int lvds_use_ssc:1;
1320 unsigned int display_clock_mode:1;
1321 unsigned int fdi_rx_polarity_inverted:1;
1322 unsigned int panel_type:4;
1323 int lvds_ssc_freq;
1324 unsigned int bios_lvds_val;
1325
1326 enum drrs_support_type drrs_type;
1327
1328 struct {
1329 int rate;
1330 int lanes;
1331 int preemphasis;
1332 int vswing;
1333 bool low_vswing;
1334 bool initialized;
1335 bool support;
1336 int bpp;
1337 struct edp_power_seq pps;
1338 } edp;
1339
1340 struct {
1341 bool full_link;
1342 bool require_aux_wakeup;
1343 int idle_frames;
1344 enum psr_lines_to_wait lines_to_wait;
1345 int tp1_wakeup_time;
1346 int tp2_tp3_wakeup_time;
1347 } psr;
1348
1349 struct {
1350 u16 pwm_freq_hz;
1351 bool present;
1352 bool active_low_pwm;
1353 u8 min_brightness;
1354 u8 controller;
1355 enum intel_backlight_type type;
1356 } backlight;
1357
1358
1359 struct {
1360 u16 panel_id;
1361 struct mipi_config *config;
1362 struct mipi_pps_data *pps;
1363 u16 bl_ports;
1364 u16 cabc_ports;
1365 u8 seq_version;
1366 u32 size;
1367 u8 *data;
1368 const u8 *sequence[MIPI_SEQ_MAX];
1369 u8 *deassert_seq;
1370 } dsi;
1371
1372 int crt_ddc_pin;
1373
1374 int child_dev_num;
1375 struct child_device_config *child_dev;
1376
1377 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1378 struct sdvo_device_mapping sdvo_mappings[2];
1379};
1380
1381enum intel_ddb_partitioning {
1382 INTEL_DDB_PART_1_2,
1383 INTEL_DDB_PART_5_6,
1384};
1385
1386struct intel_wm_level {
1387 bool enable;
1388 uint32_t pri_val;
1389 uint32_t spr_val;
1390 uint32_t cur_val;
1391 uint32_t fbc_val;
1392};
1393
1394struct ilk_wm_values {
1395 uint32_t wm_pipe[3];
1396 uint32_t wm_lp[3];
1397 uint32_t wm_lp_spr[3];
1398 uint32_t wm_linetime[3];
1399 bool enable_fbc_wm;
1400 enum intel_ddb_partitioning partitioning;
1401};
1402
1403struct g4x_pipe_wm {
1404 uint16_t plane[I915_MAX_PLANES];
1405 uint16_t fbc;
1406};
1407
1408struct g4x_sr_wm {
1409 uint16_t plane;
1410 uint16_t cursor;
1411 uint16_t fbc;
1412};
1413
1414struct vlv_wm_ddl_values {
1415 uint8_t plane[I915_MAX_PLANES];
1416};
1417
1418struct vlv_wm_values {
1419 struct g4x_pipe_wm pipe[3];
1420 struct g4x_sr_wm sr;
1421 struct vlv_wm_ddl_values ddl[3];
1422 uint8_t level;
1423 bool cxsr;
1424};
1425
1426struct g4x_wm_values {
1427 struct g4x_pipe_wm pipe[2];
1428 struct g4x_sr_wm sr;
1429 struct g4x_sr_wm hpll;
1430 bool cxsr;
1431 bool hpll_en;
1432 bool fbc_en;
1433};
1434
1435struct skl_ddb_entry {
1436 uint16_t start, end;
1437};
1438
1439static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1440{
1441 return entry->end - entry->start;
1442}
1443
1444static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
1445 const struct skl_ddb_entry *e2)
1446{
1447 if (e1->start == e2->start && e1->end == e2->end)
1448 return true;
1449
1450 return false;
1451}
1452
1453struct skl_ddb_allocation {
1454 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
1455 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
1456};
1457
1458struct skl_wm_values {
1459 unsigned dirty_pipes;
1460 struct skl_ddb_allocation ddb;
1461};
1462
1463struct skl_wm_level {
1464 bool plane_en;
1465 uint16_t plane_res_b;
1466 uint8_t plane_res_l;
1467};
1468
1469
1470struct skl_wm_params {
1471 bool x_tiled, y_tiled;
1472 bool rc_surface;
1473 uint32_t width;
1474 uint8_t cpp;
1475 uint32_t plane_pixel_rate;
1476 uint32_t y_min_scanlines;
1477 uint32_t plane_bytes_per_line;
1478 uint_fixed_16_16_t plane_blocks_per_line;
1479 uint_fixed_16_16_t y_tile_minimum;
1480 uint32_t linetime_us;
1481 uint32_t dbuf_block_size;
1482};
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507struct i915_runtime_pm {
1508 atomic_t wakeref_count;
1509 bool suspended;
1510 bool irqs_enabled;
1511};
1512
1513enum intel_pipe_crc_source {
1514 INTEL_PIPE_CRC_SOURCE_NONE,
1515 INTEL_PIPE_CRC_SOURCE_PLANE1,
1516 INTEL_PIPE_CRC_SOURCE_PLANE2,
1517 INTEL_PIPE_CRC_SOURCE_PF,
1518 INTEL_PIPE_CRC_SOURCE_PIPE,
1519
1520 INTEL_PIPE_CRC_SOURCE_TV,
1521 INTEL_PIPE_CRC_SOURCE_DP_B,
1522 INTEL_PIPE_CRC_SOURCE_DP_C,
1523 INTEL_PIPE_CRC_SOURCE_DP_D,
1524 INTEL_PIPE_CRC_SOURCE_AUTO,
1525 INTEL_PIPE_CRC_SOURCE_MAX,
1526};
1527
1528struct intel_pipe_crc_entry {
1529 uint32_t frame;
1530 uint32_t crc[5];
1531};
1532
1533#define INTEL_PIPE_CRC_ENTRIES_NR 128
1534struct intel_pipe_crc {
1535 spinlock_t lock;
1536 bool opened;
1537 struct intel_pipe_crc_entry *entries;
1538 enum intel_pipe_crc_source source;
1539 int head, tail;
1540 wait_queue_head_t wq;
1541 int skipped;
1542};
1543
1544struct i915_frontbuffer_tracking {
1545 spinlock_t lock;
1546
1547
1548
1549
1550
1551 unsigned busy_bits;
1552 unsigned flip_bits;
1553};
1554
1555struct i915_wa_reg {
1556 i915_reg_t addr;
1557 u32 value;
1558
1559 u32 mask;
1560};
1561
1562#define I915_MAX_WA_REGS 16
1563
1564struct i915_workarounds {
1565 struct i915_wa_reg reg[I915_MAX_WA_REGS];
1566 u32 count;
1567 u32 hw_whitelist_count[I915_NUM_ENGINES];
1568};
1569
1570struct i915_virtual_gpu {
1571 bool active;
1572 u32 caps;
1573};
1574
1575
1576struct intel_wm_config {
1577 unsigned int num_pipes_active;
1578 bool sprites_enabled;
1579 bool sprites_scaled;
1580};
1581
1582struct i915_oa_format {
1583 u32 format;
1584 int size;
1585};
1586
1587struct i915_oa_reg {
1588 i915_reg_t addr;
1589 u32 value;
1590};
1591
1592struct i915_oa_config {
1593 char uuid[UUID_STRING_LEN + 1];
1594 int id;
1595
1596 const struct i915_oa_reg *mux_regs;
1597 u32 mux_regs_len;
1598 const struct i915_oa_reg *b_counter_regs;
1599 u32 b_counter_regs_len;
1600 const struct i915_oa_reg *flex_regs;
1601 u32 flex_regs_len;
1602
1603 struct attribute_group sysfs_metric;
1604 struct attribute *attrs[2];
1605 struct device_attribute sysfs_metric_id;
1606
1607 atomic_t ref_count;
1608};
1609
1610struct i915_perf_stream;
1611
1612
1613
1614
1615struct i915_perf_stream_ops {
1616
1617
1618
1619
1620
1621 void (*enable)(struct i915_perf_stream *stream);
1622
1623
1624
1625
1626
1627
1628 void (*disable)(struct i915_perf_stream *stream);
1629
1630
1631
1632
1633
1634 void (*poll_wait)(struct i915_perf_stream *stream,
1635 struct file *file,
1636 poll_table *wait);
1637
1638
1639
1640
1641
1642
1643 int (*wait_unlocked)(struct i915_perf_stream *stream);
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 int (*read)(struct i915_perf_stream *stream,
1664 char __user *buf,
1665 size_t count,
1666 size_t *offset);
1667
1668
1669
1670
1671
1672
1673 void (*destroy)(struct i915_perf_stream *stream);
1674};
1675
1676
1677
1678
1679struct i915_perf_stream {
1680
1681
1682
1683 struct drm_i915_private *dev_priv;
1684
1685
1686
1687
1688 struct list_head link;
1689
1690
1691
1692
1693
1694
1695 u32 sample_flags;
1696
1697
1698
1699
1700
1701
1702 int sample_size;
1703
1704
1705
1706
1707
1708 struct i915_gem_context *ctx;
1709
1710
1711
1712
1713
1714
1715 bool enabled;
1716
1717
1718
1719
1720
1721 const struct i915_perf_stream_ops *ops;
1722
1723
1724
1725
1726 struct i915_oa_config *oa_config;
1727};
1728
1729
1730
1731
1732struct i915_oa_ops {
1733
1734
1735
1736
1737 bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
1738 u32 addr);
1739
1740
1741
1742
1743
1744 bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
1745
1746
1747
1748
1749
1750 bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767 void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
1768
1769
1770
1771
1772
1773
1774
1775 int (*enable_metric_set)(struct drm_i915_private *dev_priv,
1776 const struct i915_oa_config *oa_config);
1777
1778
1779
1780
1781
1782 void (*disable_metric_set)(struct drm_i915_private *dev_priv);
1783
1784
1785
1786
1787 void (*oa_enable)(struct drm_i915_private *dev_priv);
1788
1789
1790
1791
1792 void (*oa_disable)(struct drm_i915_private *dev_priv);
1793
1794
1795
1796
1797
1798 int (*read)(struct i915_perf_stream *stream,
1799 char __user *buf,
1800 size_t count,
1801 size_t *offset);
1802
1803
1804
1805
1806
1807
1808
1809
1810 u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
1811};
1812
1813struct intel_cdclk_state {
1814 unsigned int cdclk, vco, ref, bypass;
1815 u8 voltage_level;
1816};
1817
1818struct drm_i915_private {
1819 struct drm_device drm;
1820
1821 struct kmem_cache *objects;
1822 struct kmem_cache *vmas;
1823 struct kmem_cache *luts;
1824 struct kmem_cache *requests;
1825 struct kmem_cache *dependencies;
1826 struct kmem_cache *priorities;
1827
1828 const struct intel_device_info info;
1829 struct intel_driver_caps caps;
1830
1831
1832
1833
1834
1835
1836
1837
1838 struct resource dsm;
1839
1840
1841
1842 struct resource dsm_reserved;
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853 resource_size_t stolen_usable_size;
1854
1855 void __iomem *regs;
1856
1857 struct intel_uncore uncore;
1858
1859 struct i915_virtual_gpu vgpu;
1860
1861 struct intel_gvt *gvt;
1862
1863 struct intel_huc huc;
1864 struct intel_guc guc;
1865
1866 struct intel_csr csr;
1867
1868 struct intel_gmbus gmbus[GMBUS_NUM_PINS];
1869
1870
1871
1872 struct mutex gmbus_mutex;
1873
1874
1875
1876
1877 uint32_t gpio_mmio_base;
1878
1879
1880 uint32_t mipi_mmio_base;
1881
1882 uint32_t psr_mmio_base;
1883
1884 uint32_t pps_mmio_base;
1885
1886 wait_queue_head_t gmbus_wait_queue;
1887
1888 struct pci_dev *bridge_dev;
1889 struct intel_engine_cs *engine[I915_NUM_ENGINES];
1890
1891 struct i915_gem_context *kernel_context;
1892
1893 struct i915_gem_context *preempt_context;
1894 struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
1895 [MAX_ENGINE_INSTANCE + 1];
1896
1897 struct drm_dma_handle *status_page_dmah;
1898 struct resource mch_res;
1899
1900
1901 spinlock_t irq_lock;
1902
1903 bool display_irqs_enabled;
1904
1905
1906 struct pm_qos_request pm_qos;
1907
1908
1909 struct mutex sb_lock;
1910
1911
1912 union {
1913 u32 irq_mask;
1914 u32 de_irq_mask[I915_MAX_PIPES];
1915 };
1916 u32 gt_irq_mask;
1917 u32 pm_imr;
1918 u32 pm_ier;
1919 u32 pm_rps_events;
1920 u32 pm_guc_events;
1921 u32 pipestat_irq_mask[I915_MAX_PIPES];
1922
1923 struct i915_hotplug hotplug;
1924 struct intel_fbc fbc;
1925 struct i915_drrs drrs;
1926 struct intel_opregion opregion;
1927 struct intel_vbt_data vbt;
1928
1929 bool preserve_bios_swizzle;
1930
1931
1932 struct intel_overlay *overlay;
1933
1934
1935 struct mutex backlight_lock;
1936
1937
1938 bool no_aux_handshake;
1939
1940
1941 struct mutex pps_mutex;
1942
1943 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
1944 int num_fence_regs;
1945
1946 unsigned int fsb_freq, mem_freq, is_ddr3;
1947 unsigned int skl_preferred_vco_freq;
1948 unsigned int max_cdclk_freq;
1949
1950 unsigned int max_dotclk_freq;
1951 unsigned int rawclk_freq;
1952 unsigned int hpll_freq;
1953 unsigned int fdi_pll_freq;
1954 unsigned int czclk_freq;
1955
1956 struct {
1957
1958
1959
1960
1961
1962
1963
1964 struct intel_cdclk_state logical;
1965
1966
1967
1968
1969 struct intel_cdclk_state actual;
1970
1971 struct intel_cdclk_state hw;
1972 } cdclk;
1973
1974
1975
1976
1977
1978
1979
1980
1981 struct workqueue_struct *wq;
1982
1983
1984 struct workqueue_struct *modeset_wq;
1985
1986
1987 struct drm_i915_display_funcs display;
1988
1989
1990 enum intel_pch pch_type;
1991 unsigned short pch_id;
1992
1993 unsigned long quirks;
1994
1995 enum modeset_restore modeset_restore;
1996 struct mutex modeset_restore_lock;
1997 struct drm_atomic_state *modeset_restore_state;
1998 struct drm_modeset_acquire_ctx reset_ctx;
1999
2000 struct list_head vm_list;
2001 struct i915_ggtt ggtt;
2002
2003 struct i915_gem_mm mm;
2004 DECLARE_HASHTABLE(mm_structs, 7);
2005 struct mutex mm_lock;
2006
2007 struct intel_ppat ppat;
2008
2009
2010
2011 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
2012 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
2013
2014#ifdef CONFIG_DEBUG_FS
2015 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
2016#endif
2017
2018
2019 int num_shared_dpll;
2020 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
2021 const struct intel_dpll_mgr *dpll_mgr;
2022
2023
2024
2025
2026
2027
2028 struct mutex dpll_lock;
2029
2030 unsigned int active_crtcs;
2031
2032 int min_cdclk[I915_MAX_PIPES];
2033
2034 u8 min_voltage_level[I915_MAX_PIPES];
2035
2036 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
2037
2038 struct i915_workarounds workarounds;
2039
2040 struct i915_frontbuffer_tracking fb_tracking;
2041
2042 struct intel_atomic_helper {
2043 struct llist_head free_list;
2044 struct work_struct free_work;
2045 } atomic_helper;
2046
2047 u16 orig_clock;
2048
2049 bool mchbar_need_disable;
2050
2051 struct intel_l3_parity l3_parity;
2052
2053
2054 u32 edram_cap;
2055
2056
2057
2058
2059
2060
2061
2062 struct mutex pcu_lock;
2063
2064
2065 struct intel_gen6_power_mgmt gt_pm;
2066
2067
2068
2069 struct intel_ilk_power_mgmt ips;
2070
2071 struct i915_power_domains power_domains;
2072
2073 struct i915_psr psr;
2074
2075 struct i915_gpu_error gpu_error;
2076
2077 struct drm_i915_gem_object *vlv_pctx;
2078
2079
2080 struct intel_fbdev *fbdev;
2081 struct work_struct fbdev_suspend_work;
2082
2083 struct drm_property *broadcast_rgb_property;
2084 struct drm_property *force_audio_property;
2085
2086
2087 struct i915_audio_component *audio_component;
2088 bool audio_component_registered;
2089
2090
2091
2092
2093 struct mutex av_mutex;
2094
2095 struct {
2096 struct list_head list;
2097 struct llist_head free_list;
2098 struct work_struct free_work;
2099
2100
2101
2102
2103
2104 struct ida hw_ida;
2105#define MAX_CONTEXT_HW_ID (1<<21)
2106#define GEN11_MAX_CONTEXT_HW_ID (1<<11)
2107 } contexts;
2108
2109 u32 fdi_rx_config;
2110
2111
2112 u32 chv_phy_control;
2113
2114
2115
2116
2117
2118 u32 chv_dpll_md[I915_MAX_PIPES];
2119 u32 bxt_phy_grc;
2120
2121 u32 suspend_count;
2122 bool power_domains_suspended;
2123 struct i915_suspend_saved_registers regfile;
2124 struct vlv_s0ix_state vlv_s0ix_state;
2125
2126 enum {
2127 I915_SAGV_UNKNOWN = 0,
2128 I915_SAGV_DISABLED,
2129 I915_SAGV_ENABLED,
2130 I915_SAGV_NOT_CONTROLLED
2131 } sagv_status;
2132
2133 struct {
2134
2135
2136
2137
2138
2139
2140 uint16_t pri_latency[5];
2141
2142 uint16_t spr_latency[5];
2143
2144 uint16_t cur_latency[5];
2145
2146
2147
2148
2149
2150 uint16_t skl_latency[8];
2151
2152
2153 union {
2154 struct ilk_wm_values hw;
2155 struct skl_wm_values skl_hw;
2156 struct vlv_wm_values vlv;
2157 struct g4x_wm_values g4x;
2158 };
2159
2160 uint8_t max_level;
2161
2162
2163
2164
2165
2166
2167 struct mutex wm_mutex;
2168
2169
2170
2171
2172
2173
2174 bool distrust_bios_wm;
2175 } wm;
2176
2177 struct i915_runtime_pm runtime_pm;
2178
2179 struct {
2180 bool initialized;
2181
2182 struct kobject *metrics_kobj;
2183 struct ctl_table_header *sysctl_header;
2184
2185
2186
2187
2188
2189 struct mutex metrics_lock;
2190
2191
2192
2193
2194
2195 struct idr metrics_idr;
2196
2197
2198
2199
2200
2201 struct mutex lock;
2202 struct list_head streams;
2203
2204 struct {
2205
2206
2207
2208
2209
2210
2211 struct i915_perf_stream *exclusive_stream;
2212
2213 u32 specific_ctx_id;
2214
2215 struct hrtimer poll_check_timer;
2216 wait_queue_head_t poll_wq;
2217 bool pollin;
2218
2219
2220
2221
2222
2223 struct ratelimit_state spurious_report_rs;
2224
2225 bool periodic;
2226 int period_exponent;
2227
2228 struct i915_oa_config test_config;
2229
2230 struct {
2231 struct i915_vma *vma;
2232 u8 *vaddr;
2233 u32 last_ctx_id;
2234 int format;
2235 int format_size;
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262 spinlock_t ptr_lock;
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273 struct {
2274 u32 offset;
2275 } tails[2];
2276
2277
2278
2279
2280
2281 unsigned int aged_tail_idx;
2282
2283
2284
2285
2286
2287
2288 u64 aging_timestamp;
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299 u32 head;
2300 } oa_buffer;
2301
2302 u32 gen7_latched_oastatus1;
2303 u32 ctx_oactxctrl_offset;
2304 u32 ctx_flexeu0_offset;
2305
2306
2307
2308
2309
2310
2311 u32 gen8_valid_ctx_bit;
2312
2313 struct i915_oa_ops ops;
2314 const struct i915_oa_format *oa_formats;
2315 } oa;
2316 } perf;
2317
2318
2319 struct {
2320 void (*resume)(struct drm_i915_private *);
2321 void (*cleanup_engine)(struct intel_engine_cs *engine);
2322
2323 struct list_head timelines;
2324 struct i915_gem_timeline global_timeline;
2325 u32 active_requests;
2326
2327
2328
2329
2330
2331
2332
2333
2334 bool awake;
2335
2336
2337
2338
2339 unsigned int epoch;
2340#define I915_EPOCH_INVALID 0
2341
2342
2343
2344
2345
2346
2347
2348
2349 struct delayed_work retire_work;
2350
2351
2352
2353
2354
2355
2356
2357
2358 struct delayed_work idle_work;
2359
2360 ktime_t last_init_time;
2361 } gt;
2362
2363
2364 bool chv_phy_assert[2];
2365
2366 bool ipc_enabled;
2367
2368
2369 struct intel_encoder *av_enc_map[I915_MAX_PIPES];
2370
2371
2372 struct {
2373 struct platform_device *platdev;
2374 int irq;
2375 } lpe_audio;
2376
2377 struct i915_pmu pmu;
2378
2379
2380
2381
2382
2383};
2384
2385static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
2386{
2387 return container_of(dev, struct drm_i915_private, drm);
2388}
2389
2390static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
2391{
2392 return to_i915(dev_get_drvdata(kdev));
2393}
2394
2395static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
2396{
2397 return container_of(guc, struct drm_i915_private, guc);
2398}
2399
2400static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
2401{
2402 return container_of(huc, struct drm_i915_private, huc);
2403}
2404
2405
2406#define for_each_engine(engine__, dev_priv__, id__) \
2407 for ((id__) = 0; \
2408 (id__) < I915_NUM_ENGINES; \
2409 (id__)++) \
2410 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
2411
2412
2413#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
2414 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
2415 tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; )
2416
2417enum hdmi_force_audio {
2418 HDMI_AUDIO_OFF_DVI = -2,
2419 HDMI_AUDIO_OFF,
2420 HDMI_AUDIO_AUTO,
2421 HDMI_AUDIO_ON,
2422};
2423
2424#define I915_GTT_OFFSET_NONE ((u32)-1)
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
2435#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
2436 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
2437 BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
2438 BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
2439})
2440#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
2441 BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
2442#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
2443 GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
2444 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
2445
2446
2447
2448
2449static __always_inline struct sgt_iter {
2450 struct scatterlist *sgp;
2451 union {
2452 unsigned long pfn;
2453 dma_addr_t dma;
2454 };
2455 unsigned int curr;
2456 unsigned int max;
2457} __sgt_iter(struct scatterlist *sgl, bool dma) {
2458 struct sgt_iter s = { .sgp = sgl };
2459
2460 if (s.sgp) {
2461 s.max = s.curr = s.sgp->offset;
2462 s.max += s.sgp->length;
2463 if (dma)
2464 s.dma = sg_dma_address(s.sgp);
2465 else
2466 s.pfn = page_to_pfn(sg_page(s.sgp));
2467 }
2468
2469 return s;
2470}
2471
2472static inline struct scatterlist *____sg_next(struct scatterlist *sg)
2473{
2474 ++sg;
2475 if (unlikely(sg_is_chain(sg)))
2476 sg = sg_chain_ptr(sg);
2477 return sg;
2478}
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2490{
2491#ifdef CONFIG_DEBUG_SG
2492 BUG_ON(sg->sg_magic != SG_MAGIC);
2493#endif
2494 return sg_is_last(sg) ? NULL : ____sg_next(sg);
2495}
2496
2497
2498
2499
2500
2501
2502
2503#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2504 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2505 ((__dmap) = (__iter).dma + (__iter).curr); \
2506 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
2507 (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
2508
2509
2510
2511
2512
2513
2514
2515#define for_each_sgt_page(__pp, __iter, __sgt) \
2516 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
2517 ((__pp) = (__iter).pfn == 0 ? NULL : \
2518 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2519 (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
2520 (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
2521
2522static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
2523{
2524 unsigned int page_sizes;
2525
2526 page_sizes = 0;
2527 while (sg) {
2528 GEM_BUG_ON(sg->offset);
2529 GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
2530 page_sizes |= sg->length;
2531 sg = __sg_next(sg);
2532 }
2533
2534 return page_sizes;
2535}
2536
2537static inline unsigned int i915_sg_segment_size(void)
2538{
2539 unsigned int size = swiotlb_max_segment();
2540
2541 if (size == 0)
2542 return SCATTERLIST_MAX_SEGMENT;
2543
2544 size = rounddown(size, PAGE_SIZE);
2545
2546 if (size < PAGE_SIZE)
2547 size = PAGE_SIZE;
2548
2549 return size;
2550}
2551
2552static inline const struct intel_device_info *
2553intel_info(const struct drm_i915_private *dev_priv)
2554{
2555 return &dev_priv->info;
2556}
2557
2558#define INTEL_INFO(dev_priv) intel_info((dev_priv))
2559
2560#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
2561#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
2562
2563#define REVID_FOREVER 0xff
2564#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
2565
2566#define GEN_FOREVER (0)
2567
2568#define INTEL_GEN_MASK(s, e) ( \
2569 BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
2570 BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
2571 GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
2572 (s) != GEN_FOREVER ? (s) - 1 : 0) \
2573)
2574
2575
2576
2577
2578
2579
2580#define IS_GEN(dev_priv, s, e) \
2581 (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
2582
2583
2584
2585
2586
2587
2588#define IS_REVID(p, since, until) \
2589 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
2590
2591#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
2592
2593#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
2594#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
2595#define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
2596#define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
2597#define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
2598#define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
2599#define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
2600#define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
2601#define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
2602#define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
2603#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
2604#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
2605#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
2606#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
2607#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
2608#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
2609#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
2610#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
2611#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
2612#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
2613 (dev_priv)->info.gt == 1)
2614#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
2615#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
2616#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
2617#define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
2618#define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
2619#define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
2620#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
2621#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
2622#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
2623#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
2624#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
2625#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
2626#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
2627 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
2628#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
2629 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
2630 (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
2631 (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
2632
2633#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
2634 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
2635#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
2636 (dev_priv)->info.gt == 3)
2637#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
2638 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
2639#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
2640 (dev_priv)->info.gt == 3)
2641
2642#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
2643 INTEL_DEVID(dev_priv) == 0x0A1E)
2644#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
2645 INTEL_DEVID(dev_priv) == 0x1913 || \
2646 INTEL_DEVID(dev_priv) == 0x1916 || \
2647 INTEL_DEVID(dev_priv) == 0x1921 || \
2648 INTEL_DEVID(dev_priv) == 0x1926)
2649#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
2650 INTEL_DEVID(dev_priv) == 0x1915 || \
2651 INTEL_DEVID(dev_priv) == 0x191E)
2652#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
2653 INTEL_DEVID(dev_priv) == 0x5913 || \
2654 INTEL_DEVID(dev_priv) == 0x5916 || \
2655 INTEL_DEVID(dev_priv) == 0x5921 || \
2656 INTEL_DEVID(dev_priv) == 0x5926)
2657#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
2658 INTEL_DEVID(dev_priv) == 0x5915 || \
2659 INTEL_DEVID(dev_priv) == 0x591E)
2660#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
2661 (dev_priv)->info.gt == 2)
2662#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
2663 (dev_priv)->info.gt == 3)
2664#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
2665 (dev_priv)->info.gt == 4)
2666#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
2667 (dev_priv)->info.gt == 2)
2668#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
2669 (dev_priv)->info.gt == 3)
2670#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2671 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
2672#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2673 (dev_priv)->info.gt == 2)
2674#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
2675 (dev_priv)->info.gt == 3)
2676#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
2677 (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
2678
2679#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
2680
2681#define SKL_REVID_A0 0x0
2682#define SKL_REVID_B0 0x1
2683#define SKL_REVID_C0 0x2
2684#define SKL_REVID_D0 0x3
2685#define SKL_REVID_E0 0x4
2686#define SKL_REVID_F0 0x5
2687#define SKL_REVID_G0 0x6
2688#define SKL_REVID_H0 0x7
2689
2690#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2691
2692#define BXT_REVID_A0 0x0
2693#define BXT_REVID_A1 0x1
2694#define BXT_REVID_B0 0x3
2695#define BXT_REVID_B_LAST 0x8
2696#define BXT_REVID_C0 0x9
2697
2698#define IS_BXT_REVID(dev_priv, since, until) \
2699 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
2700
2701#define KBL_REVID_A0 0x0
2702#define KBL_REVID_B0 0x1
2703#define KBL_REVID_C0 0x2
2704#define KBL_REVID_D0 0x3
2705#define KBL_REVID_E0 0x4
2706
2707#define IS_KBL_REVID(dev_priv, since, until) \
2708 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2709
2710#define GLK_REVID_A0 0x0
2711#define GLK_REVID_A1 0x1
2712
2713#define IS_GLK_REVID(dev_priv, since, until) \
2714 (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
2715
2716#define CNL_REVID_A0 0x0
2717#define CNL_REVID_B0 0x1
2718#define CNL_REVID_C0 0x2
2719
2720#define IS_CNL_REVID(p, since, until) \
2721 (IS_CANNONLAKE(p) && IS_REVID(p, since, until))
2722
2723
2724
2725
2726
2727
2728
2729#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
2730#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
2731#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
2732#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
2733#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
2734#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
2735#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
2736#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
2737#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
2738#define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10)))
2739
2740#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
2741#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
2742#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
2743
2744#define ENGINE_MASK(id) BIT(id)
2745#define RENDER_RING ENGINE_MASK(RCS)
2746#define BSD_RING ENGINE_MASK(VCS)
2747#define BLT_RING ENGINE_MASK(BCS)
2748#define VEBOX_RING ENGINE_MASK(VECS)
2749#define BSD2_RING ENGINE_MASK(VCS2)
2750#define BSD3_RING ENGINE_MASK(VCS3)
2751#define BSD4_RING ENGINE_MASK(VCS4)
2752#define VEBOX2_RING ENGINE_MASK(VECS2)
2753#define ALL_ENGINES (~0)
2754
2755#define HAS_ENGINE(dev_priv, id) \
2756 (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
2757
2758#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
2759#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
2760#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
2761#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
2762
2763#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
2764
2765#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
2766#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
2767#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
2768#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
2769 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
2770
2771#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
2772
2773#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
2774 ((dev_priv)->info.has_logical_ring_contexts)
2775#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
2776 ((dev_priv)->info.has_logical_ring_elsq)
2777#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
2778 ((dev_priv)->info.has_logical_ring_preemption)
2779
2780#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
2781
2782#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
2783#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
2784#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
2785#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
2786 GEM_BUG_ON((sizes) == 0); \
2787 ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
2788})
2789
2790#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
2791#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
2792 ((dev_priv)->info.overlay_needs_physical)
2793
2794
2795#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
2796
2797
2798#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2799 (IS_CANNONLAKE(dev_priv) || \
2800 IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811#define HAS_AUX_IRQ(dev_priv) true
2812#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
2813
2814
2815
2816
2817#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
2818 !(IS_I915G(dev_priv) || \
2819 IS_I915GM(dev_priv)))
2820#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
2821#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
2822
2823#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
2824#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
2825#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
2826
2827#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
2828
2829#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
2830
2831#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
2832#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
2833#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
2834
2835#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
2836#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
2837#define HAS_RC6pp(dev_priv) (false)
2838
2839#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
2840
2841#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
2842#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
2843
2844#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
2845
2846
2847
2848
2849
2850
2851#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
2852#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct)
2853#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
2854#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
2855
2856
2857#define HAS_HUC(dev_priv) (HAS_GUC(dev_priv))
2858#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
2859
2860
2861#define USES_GUC(dev_priv) intel_uc_is_using_guc()
2862#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission()
2863#define USES_HUC(dev_priv) intel_uc_is_using_huc()
2864
2865#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
2866
2867#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
2868
2869#define INTEL_PCH_DEVICE_ID_MASK 0xff80
2870#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
2871#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
2872#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
2873#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
2874#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
2875#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
2876#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
2877#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2878#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
2879#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
2880#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
2881#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
2882#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
2883#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
2884#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
2885#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900
2886
2887#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
2888#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
2889#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
2890#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
2891#define HAS_PCH_CNP_LP(dev_priv) \
2892 (INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
2893#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
2894#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
2895#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
2896#define HAS_PCH_LPT_LP(dev_priv) \
2897 (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
2898 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
2899#define HAS_PCH_LPT_H(dev_priv) \
2900 (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
2901 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
2902#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
2903#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
2904#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
2905#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
2906
2907#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
2908
2909#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
2910
2911
2912#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
2913#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2914 2 : HAS_L3_DPF(dev_priv))
2915
2916#define GT_FREQUENCY_MULTIPLIER 50
2917#define GEN9_FREQ_SCALER 3
2918
2919#include "i915_trace.h"
2920
2921static inline bool intel_vtd_active(void)
2922{
2923#ifdef CONFIG_INTEL_IOMMU
2924 if (intel_iommu_gfx_mapped)
2925 return true;
2926#endif
2927 return false;
2928}
2929
2930static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2931{
2932 return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
2933}
2934
2935static inline bool
2936intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2937{
2938 return IS_BROXTON(dev_priv) && intel_vtd_active();
2939}
2940
2941int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2942 int enable_ppgtt);
2943
2944
2945void __printf(3, 4)
2946__i915_printk(struct drm_i915_private *dev_priv, const char *level,
2947 const char *fmt, ...);
2948
2949#define i915_report_error(dev_priv, fmt, ...) \
2950 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
2951
2952#ifdef CONFIG_COMPAT
2953extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2954 unsigned long arg);
2955#else
2956#define i915_compat_ioctl NULL
2957#endif
2958extern const struct dev_pm_ops i915_pm_ops;
2959
2960extern int i915_driver_load(struct pci_dev *pdev,
2961 const struct pci_device_id *ent);
2962extern void i915_driver_unload(struct drm_device *dev);
2963extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2964extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
2965
2966#define I915_RESET_QUIET BIT(0)
2967extern void i915_reset(struct drm_i915_private *i915, unsigned int flags);
2968extern int i915_reset_engine(struct intel_engine_cs *engine,
2969 unsigned int flags);
2970
2971extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
2972extern int intel_reset_guc(struct drm_i915_private *dev_priv);
2973extern int intel_guc_reset_engine(struct intel_guc *guc,
2974 struct intel_engine_cs *engine);
2975extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2976extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
2977extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2978extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
2979extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
2980extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2981int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2982
2983int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
2984int intel_engines_init(struct drm_i915_private *dev_priv);
2985
2986
2987void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
2988 u32 pin_mask, u32 long_mask);
2989void intel_hpd_init(struct drm_i915_private *dev_priv);
2990void intel_hpd_init_work(struct drm_i915_private *dev_priv);
2991void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2992enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
2993 enum hpd_pin pin);
2994enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
2995 enum port port);
2996bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2997void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2998
2999
3000static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
3001{
3002 unsigned long delay;
3003
3004 if (unlikely(!i915_modparams.enable_hangcheck))
3005 return;
3006
3007
3008
3009
3010
3011
3012 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
3013 queue_delayed_work(system_long_wq,
3014 &dev_priv->gpu_error.hangcheck_work, delay);
3015}
3016
3017__printf(3, 4)
3018void i915_handle_error(struct drm_i915_private *dev_priv,
3019 u32 engine_mask,
3020 const char *fmt, ...);
3021
3022extern void intel_irq_init(struct drm_i915_private *dev_priv);
3023extern void intel_irq_fini(struct drm_i915_private *dev_priv);
3024int intel_irq_install(struct drm_i915_private *dev_priv);
3025void intel_irq_uninstall(struct drm_i915_private *dev_priv);
3026
3027static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
3028{
3029 return dev_priv->gvt;
3030}
3031
3032static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
3033{
3034 return dev_priv->vgpu.active;
3035}
3036
3037u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
3038 enum pipe pipe);
3039void
3040i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
3041 u32 status_mask);
3042
3043void
3044i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
3045 u32 status_mask);
3046
3047void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
3048void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
3049void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
3050 uint32_t mask,
3051 uint32_t bits);
3052void ilk_update_display_irq(struct drm_i915_private *dev_priv,
3053 uint32_t interrupt_mask,
3054 uint32_t enabled_irq_mask);
3055static inline void
3056ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3057{
3058 ilk_update_display_irq(dev_priv, bits, bits);
3059}
3060static inline void
3061ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
3062{
3063 ilk_update_display_irq(dev_priv, bits, 0);
3064}
3065void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
3066 enum pipe pipe,
3067 uint32_t interrupt_mask,
3068 uint32_t enabled_irq_mask);
3069static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
3070 enum pipe pipe, uint32_t bits)
3071{
3072 bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
3073}
3074static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
3075 enum pipe pipe, uint32_t bits)
3076{
3077 bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
3078}
3079void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
3080 uint32_t interrupt_mask,
3081 uint32_t enabled_irq_mask);
3082static inline void
3083ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3084{
3085 ibx_display_interrupt_update(dev_priv, bits, bits);
3086}
3087static inline void
3088ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3089{
3090 ibx_display_interrupt_update(dev_priv, bits, 0);
3091}
3092
3093
3094int i915_gem_create_ioctl(struct drm_device *dev, void *data,
3095 struct drm_file *file_priv);
3096int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
3097 struct drm_file *file_priv);
3098int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
3099 struct drm_file *file_priv);
3100int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
3101 struct drm_file *file_priv);
3102int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
3103 struct drm_file *file_priv);
3104int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
3105 struct drm_file *file_priv);
3106int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
3107 struct drm_file *file_priv);
3108int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
3109 struct drm_file *file_priv);
3110int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
3111 struct drm_file *file_priv);
3112int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3113 struct drm_file *file_priv);
3114int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3115 struct drm_file *file);
3116int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3117 struct drm_file *file);
3118int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3119 struct drm_file *file_priv);
3120int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3121 struct drm_file *file_priv);
3122int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
3123 struct drm_file *file_priv);
3124int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
3125 struct drm_file *file_priv);
3126int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
3127void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
3128int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
3129 struct drm_file *file);
3130int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
3131 struct drm_file *file_priv);
3132int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
3133 struct drm_file *file_priv);
3134void i915_gem_sanitize(struct drm_i915_private *i915);
3135int i915_gem_load_init(struct drm_i915_private *dev_priv);
3136void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
3137void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
3138int i915_gem_freeze(struct drm_i915_private *dev_priv);
3139int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3140
3141void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
3142void i915_gem_object_free(struct drm_i915_gem_object *obj);
3143void i915_gem_object_init(struct drm_i915_gem_object *obj,
3144 const struct drm_i915_gem_object_ops *ops);
3145struct drm_i915_gem_object *
3146i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
3147struct drm_i915_gem_object *
3148i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
3149 const void *data, size_t size);
3150void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
3151void i915_gem_free_object(struct drm_gem_object *obj);
3152
3153static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
3154{
3155 if (!atomic_read(&i915->mm.free_count))
3156 return;
3157
3158
3159
3160
3161
3162
3163
3164 do {
3165 rcu_barrier();
3166 } while (flush_work(&i915->mm.free_work));
3167}
3168
3169static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
3170{
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182 int pass = 2;
3183 do {
3184 rcu_barrier();
3185 drain_workqueue(i915->wq);
3186 } while (--pass);
3187}
3188
3189struct i915_vma * __must_check
3190i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3191 const struct i915_ggtt_view *view,
3192 u64 size,
3193 u64 alignment,
3194 u64 flags);
3195
3196int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
3197void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
3198
3199void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
3200
3201static inline int __sg_page_count(const struct scatterlist *sg)
3202{
3203 return sg->length >> PAGE_SHIFT;
3204}
3205
3206struct scatterlist *
3207i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
3208 unsigned int n, unsigned int *offset);
3209
3210struct page *
3211i915_gem_object_get_page(struct drm_i915_gem_object *obj,
3212 unsigned int n);
3213
3214struct page *
3215i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
3216 unsigned int n);
3217
3218dma_addr_t
3219i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
3220 unsigned long n);
3221
3222void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
3223 struct sg_table *pages,
3224 unsigned int sg_page_sizes);
3225int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
3226
3227static inline int __must_check
3228i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3229{
3230 might_lock(&obj->mm.lock);
3231
3232 if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
3233 return 0;
3234
3235 return __i915_gem_object_get_pages(obj);
3236}
3237
3238static inline bool
3239i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
3240{
3241 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
3242}
3243
3244static inline void
3245__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
3246{
3247 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
3248
3249 atomic_inc(&obj->mm.pages_pin_count);
3250}
3251
3252static inline bool
3253i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
3254{
3255 return atomic_read(&obj->mm.pages_pin_count);
3256}
3257
3258static inline void
3259__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3260{
3261 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
3262 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
3263
3264 atomic_dec(&obj->mm.pages_pin_count);
3265}
3266
3267static inline void
3268i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3269{
3270 __i915_gem_object_unpin_pages(obj);
3271}
3272
3273enum i915_mm_subclass {
3274 I915_MM_NORMAL = 0,
3275 I915_MM_SHRINKER
3276};
3277
3278void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
3279 enum i915_mm_subclass subclass);
3280void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
3281
3282enum i915_map_type {
3283 I915_MAP_WB = 0,
3284 I915_MAP_WC,
3285#define I915_MAP_OVERRIDE BIT(31)
3286 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
3287 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
3288};
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
3307 enum i915_map_type type);
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
3319{
3320 i915_gem_object_unpin_pages(obj);
3321}
3322
3323int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
3324 unsigned int *needs_clflush);
3325int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
3326 unsigned int *needs_clflush);
3327#define CLFLUSH_BEFORE BIT(0)
3328#define CLFLUSH_AFTER BIT(1)
3329#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
3330
3331static inline void
3332i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
3333{
3334 i915_gem_object_unpin_pages(obj);
3335}
3336
3337int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
3338void i915_vma_move_to_active(struct i915_vma *vma,
3339 struct i915_request *rq,
3340 unsigned int flags);
3341int i915_gem_dumb_create(struct drm_file *file_priv,
3342 struct drm_device *dev,
3343 struct drm_mode_create_dumb *args);
3344int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3345 uint32_t handle, uint64_t *offset);
3346int i915_gem_mmap_gtt_version(void);
3347
3348void i915_gem_track_fb(struct drm_i915_gem_object *old,
3349 struct drm_i915_gem_object *new,
3350 unsigned frontbuffer_bits);
3351
3352int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
3353
3354struct i915_request *
3355i915_gem_find_active_request(struct intel_engine_cs *engine);
3356
3357static inline bool i915_reset_backoff(struct i915_gpu_error *error)
3358{
3359 return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
3360}
3361
3362static inline bool i915_reset_handoff(struct i915_gpu_error *error)
3363{
3364 return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags));
3365}
3366
3367static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
3368{
3369 return unlikely(test_bit(I915_WEDGED, &error->flags));
3370}
3371
3372static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
3373{
3374 return i915_reset_backoff(error) | i915_terminally_wedged(error);
3375}
3376
3377static inline u32 i915_reset_count(struct i915_gpu_error *error)
3378{
3379 return READ_ONCE(error->reset_count);
3380}
3381
3382static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
3383 struct intel_engine_cs *engine)
3384{
3385 return READ_ONCE(error->reset_engine_count[engine->id]);
3386}
3387
3388struct i915_request *
3389i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
3390int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
3391void i915_gem_reset(struct drm_i915_private *dev_priv);
3392void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
3393void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
3394void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
3395bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
3396void i915_gem_reset_engine(struct intel_engine_cs *engine,
3397 struct i915_request *request);
3398
3399void i915_gem_init_mmio(struct drm_i915_private *i915);
3400int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
3401int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
3402void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
3403void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
3404int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
3405 unsigned int flags);
3406int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
3407void i915_gem_resume(struct drm_i915_private *dev_priv);
3408int i915_gem_fault(struct vm_fault *vmf);
3409int i915_gem_object_wait(struct drm_i915_gem_object *obj,
3410 unsigned int flags,
3411 long timeout,
3412 struct intel_rps_client *rps);
3413int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
3414 unsigned int flags,
3415 int priority);
3416#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
3417
3418int __must_check
3419i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
3420int __must_check
3421i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
3422int __must_check
3423i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
3424struct i915_vma * __must_check
3425i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3426 u32 alignment,
3427 const struct i915_ggtt_view *view,
3428 unsigned int flags);
3429void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
3430int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
3431 int align);
3432int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
3433void i915_gem_release(struct drm_device *dev, struct drm_file *file);
3434
3435int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3436 enum i915_cache_level cache_level);
3437
3438struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
3439 struct dma_buf *dma_buf);
3440
3441struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
3442 struct drm_gem_object *gem_obj, int flags);
3443
3444static inline struct i915_hw_ppgtt *
3445i915_vm_to_ppgtt(struct i915_address_space *vm)
3446{
3447 return container_of(vm, struct i915_hw_ppgtt, base);
3448}
3449
3450
3451struct drm_i915_fence_reg *
3452i915_reserve_fence(struct drm_i915_private *dev_priv);
3453void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
3454
3455void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
3456void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
3457
3458void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
3459void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
3460 struct sg_table *pages);
3461void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
3462 struct sg_table *pages);
3463
3464static inline struct i915_gem_context *
3465__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
3466{
3467 return idr_find(&file_priv->context_idr, id);
3468}
3469
3470static inline struct i915_gem_context *
3471i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3472{
3473 struct i915_gem_context *ctx;
3474
3475 rcu_read_lock();
3476 ctx = __i915_gem_context_lookup_rcu(file_priv, id);
3477 if (ctx && !kref_get_unless_zero(&ctx->ref))
3478 ctx = NULL;
3479 rcu_read_unlock();
3480
3481 return ctx;
3482}
3483
3484static inline struct intel_timeline *
3485i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
3486 struct intel_engine_cs *engine)
3487{
3488 struct i915_address_space *vm;
3489
3490 vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
3491 return &vm->timeline.engine[engine->id];
3492}
3493
3494int i915_perf_open_ioctl(struct drm_device *dev, void *data,
3495 struct drm_file *file);
3496int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
3497 struct drm_file *file);
3498int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
3499 struct drm_file *file);
3500void i915_oa_init_reg_state(struct intel_engine_cs *engine,
3501 struct i915_gem_context *ctx,
3502 uint32_t *reg_state);
3503
3504
3505int __must_check i915_gem_evict_something(struct i915_address_space *vm,
3506 u64 min_size, u64 alignment,
3507 unsigned cache_level,
3508 u64 start, u64 end,
3509 unsigned flags);
3510int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
3511 struct drm_mm_node *node,
3512 unsigned int flags);
3513int i915_gem_evict_vm(struct i915_address_space *vm);
3514
3515void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
3516
3517
3518static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3519{
3520 wmb();
3521 if (INTEL_GEN(dev_priv) < 6)
3522 intel_gtt_chipset_flush();
3523}
3524
3525
3526int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
3527 struct drm_mm_node *node, u64 size,
3528 unsigned alignment);
3529int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
3530 struct drm_mm_node *node, u64 size,
3531 unsigned alignment, u64 start,
3532 u64 end);
3533void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
3534 struct drm_mm_node *node);
3535int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
3536void i915_gem_cleanup_stolen(struct drm_device *dev);
3537struct drm_i915_gem_object *
3538i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
3539 resource_size_t size);
3540struct drm_i915_gem_object *
3541i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
3542 resource_size_t stolen_offset,
3543 resource_size_t gtt_offset,
3544 resource_size_t size);
3545
3546
3547struct drm_i915_gem_object *
3548i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
3549 phys_addr_t size);
3550
3551
3552unsigned long i915_gem_shrink(struct drm_i915_private *i915,
3553 unsigned long target,
3554 unsigned long *nr_scanned,
3555 unsigned flags);
3556#define I915_SHRINK_PURGEABLE 0x1
3557#define I915_SHRINK_UNBOUND 0x2
3558#define I915_SHRINK_BOUND 0x4
3559#define I915_SHRINK_ACTIVE 0x8
3560#define I915_SHRINK_VMAPS 0x10
3561unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
3562void i915_gem_shrinker_register(struct drm_i915_private *i915);
3563void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
3564
3565
3566
3567static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
3568{
3569 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3570
3571 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
3572 i915_gem_object_is_tiled(obj);
3573}
3574
3575u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
3576 unsigned int tiling, unsigned int stride);
3577u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
3578 unsigned int tiling, unsigned int stride);
3579
3580
3581#ifdef CONFIG_DEBUG_FS
3582int i915_debugfs_register(struct drm_i915_private *dev_priv);
3583int i915_debugfs_connector_add(struct drm_connector *connector);
3584void intel_display_crc_init(struct drm_i915_private *dev_priv);
3585#else
3586static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
3587static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3588{ return 0; }
3589static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
3590#endif
3591
3592
3593#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
3594
3595__printf(2, 3)
3596void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
3597int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
3598 const struct i915_gpu_state *gpu);
3599int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
3600 struct drm_i915_private *i915,
3601 size_t count, loff_t pos);
3602static inline void i915_error_state_buf_release(
3603 struct drm_i915_error_state_buf *eb)
3604{
3605 kfree(eb->buf);
3606}
3607
3608struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
3609void i915_capture_error_state(struct drm_i915_private *dev_priv,
3610 u32 engine_mask,
3611 const char *error_msg);
3612
3613static inline struct i915_gpu_state *
3614i915_gpu_state_get(struct i915_gpu_state *gpu)
3615{
3616 kref_get(&gpu->ref);
3617 return gpu;
3618}
3619
3620void __i915_gpu_state_free(struct kref *kref);
3621static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
3622{
3623 if (gpu)
3624 kref_put(&gpu->ref, __i915_gpu_state_free);
3625}
3626
3627struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
3628void i915_reset_error_state(struct drm_i915_private *i915);
3629
3630#else
3631
3632static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
3633 u32 engine_mask,
3634 const char *error_msg)
3635{
3636}
3637
3638static inline struct i915_gpu_state *
3639i915_first_error_state(struct drm_i915_private *i915)
3640{
3641 return NULL;
3642}
3643
3644static inline void i915_reset_error_state(struct drm_i915_private *i915)
3645{
3646}
3647
3648#endif
3649
3650const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3651
3652
3653int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
3654void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
3655void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
3656int intel_engine_cmd_parser(struct intel_engine_cs *engine,
3657 struct drm_i915_gem_object *batch_obj,
3658 struct drm_i915_gem_object *shadow_batch_obj,
3659 u32 batch_start_offset,
3660 u32 batch_len,
3661 bool is_master);
3662
3663
3664extern void i915_perf_init(struct drm_i915_private *dev_priv);
3665extern void i915_perf_fini(struct drm_i915_private *dev_priv);
3666extern void i915_perf_register(struct drm_i915_private *dev_priv);
3667extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
3668
3669
3670extern int i915_save_state(struct drm_i915_private *dev_priv);
3671extern int i915_restore_state(struct drm_i915_private *dev_priv);
3672
3673
3674void i915_setup_sysfs(struct drm_i915_private *dev_priv);
3675void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
3676
3677
3678int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
3679void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
3680void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
3681void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
3682 enum pipe pipe, enum port port,
3683 const void *eld, int ls_clock, bool dp_output);
3684
3685
3686extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
3687extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
3688extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
3689 unsigned int pin);
3690extern int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
3691
3692extern struct i2c_adapter *
3693intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
3694extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
3695extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
3696static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
3697{
3698 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
3699}
3700extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
3701
3702
3703void intel_bios_init(struct drm_i915_private *dev_priv);
3704void intel_bios_cleanup(struct drm_i915_private *dev_priv);
3705bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3706bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3707bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
3708bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
3709bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
3710bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
3711bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
3712bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3713 enum port port);
3714bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
3715 enum port port);
3716
3717
3718#ifdef CONFIG_ACPI
3719extern void intel_register_dsm_handler(void);
3720extern void intel_unregister_dsm_handler(void);
3721#else
3722static inline void intel_register_dsm_handler(void) { return; }
3723static inline void intel_unregister_dsm_handler(void) { return; }
3724#endif
3725
3726
3727static inline struct intel_device_info *
3728mkwrite_device_info(struct drm_i915_private *dev_priv)
3729{
3730 return (struct intel_device_info *)&dev_priv->info;
3731}
3732
3733
3734extern void intel_modeset_init_hw(struct drm_device *dev);
3735extern int intel_modeset_init(struct drm_device *dev);
3736extern void intel_modeset_cleanup(struct drm_device *dev);
3737extern int intel_connector_register(struct drm_connector *);
3738extern void intel_connector_unregister(struct drm_connector *);
3739extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
3740 bool state);
3741extern void intel_display_resume(struct drm_device *dev);
3742extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
3743extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
3744extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
3745extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
3746extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
3747extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3748 bool enable);
3749
3750int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3751 struct drm_file *file);
3752
3753
3754extern struct intel_overlay_error_state *
3755intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
3756extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3757 struct intel_overlay_error_state *error);
3758
3759extern struct intel_display_error_state *
3760intel_display_capture_error_state(struct drm_i915_private *dev_priv);
3761extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3762 struct intel_display_error_state *error);
3763
3764int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3765int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
3766 u32 val, int fast_timeout_us,
3767 int slow_timeout_ms);
3768#define sandybridge_pcode_write(dev_priv, mbox, val) \
3769 sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0)
3770
3771int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
3772 u32 reply_mask, u32 reply, int timeout_base_ms);
3773
3774
3775u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
3776int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
3777u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
3778u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
3779void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
3780u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
3781void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3782u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
3783void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3784u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
3785void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3786u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
3787void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
3788u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
3789 enum intel_sbi_destination destination);
3790void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3791 enum intel_sbi_destination destination);
3792u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3793void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3794
3795
3796void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
3797 enum dpio_phy *phy, enum dpio_channel *ch);
3798void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
3799 enum port port, u32 margin, u32 scale,
3800 u32 enable, u32 deemphasis);
3801void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3802void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
3803bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
3804 enum dpio_phy phy);
3805bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
3806 enum dpio_phy phy);
3807uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
3808void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
3809 uint8_t lane_lat_optim_mask);
3810uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
3811
3812void chv_set_phy_signal_level(struct intel_encoder *encoder,
3813 u32 deemph_reg_value, u32 margin_reg_value,
3814 bool uniq_trans_scale);
3815void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3816 const struct intel_crtc_state *crtc_state,
3817 bool reset);
3818void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
3819 const struct intel_crtc_state *crtc_state);
3820void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3821 const struct intel_crtc_state *crtc_state);
3822void chv_phy_release_cl2_override(struct intel_encoder *encoder);
3823void chv_phy_post_pll_disable(struct intel_encoder *encoder,
3824 const struct intel_crtc_state *old_crtc_state);
3825
3826void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3827 u32 demph_reg_value, u32 preemph_reg_value,
3828 u32 uniqtranscale_reg_value, u32 tx3_demph);
3829void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
3830 const struct intel_crtc_state *crtc_state);
3831void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
3832 const struct intel_crtc_state *crtc_state);
3833void vlv_phy_reset_lanes(struct intel_encoder *encoder,
3834 const struct intel_crtc_state *old_crtc_state);
3835
3836int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3837int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3838u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
3839 const i915_reg_t reg);
3840
3841u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
3842
3843static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
3844 const i915_reg_t reg)
3845{
3846 return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
3847}
3848
3849#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
3850#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
3851
3852#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
3853#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
3854#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
3855#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
3856
3857#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
3858#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
3859#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
3860#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3877
3878#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
3879 u32 upper, lower, old_upper, loop = 0; \
3880 upper = I915_READ(upper_reg); \
3881 do { \
3882 old_upper = upper; \
3883 lower = I915_READ(lower_reg); \
3884 upper = I915_READ(upper_reg); \
3885 } while (upper != old_upper && loop++ < 2); \
3886 (u64)upper << 32 | lower; })
3887
3888#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3889#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
3890
3891#define __raw_read(x, s) \
3892static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
3893 i915_reg_t reg) \
3894{ \
3895 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
3896}
3897
3898#define __raw_write(x, s) \
3899static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
3900 i915_reg_t reg, uint##x##_t val) \
3901{ \
3902 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
3903}
3904__raw_read(8, b)
3905__raw_read(16, w)
3906__raw_read(32, l)
3907__raw_read(64, q)
3908
3909__raw_write(8, b)
3910__raw_write(16, w)
3911__raw_write(32, l)
3912__raw_write(64, q)
3913
3914#undef __raw_read
3915#undef __raw_write
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3944#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
3945#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
3946#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3947
3948
3949#define INTEL_BROADCAST_RGB_AUTO 0
3950#define INTEL_BROADCAST_RGB_FULL 1
3951#define INTEL_BROADCAST_RGB_LIMITED 2
3952
3953static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
3954{
3955 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3956 return VLV_VGACNTRL;
3957 else if (INTEL_GEN(dev_priv) >= 5)
3958 return CPU_VGACNTRL;
3959 else
3960 return VGACNTRL;
3961}
3962
3963static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
3964{
3965 unsigned long j = msecs_to_jiffies(m);
3966
3967 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3968}
3969
3970static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
3971{
3972
3973 if (NSEC_PER_SEC % HZ &&
3974 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
3975 return MAX_JIFFY_OFFSET;
3976
3977 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
3978}
3979
3980static inline unsigned long
3981timespec_to_jiffies_timeout(const struct timespec *value)
3982{
3983 unsigned long j = timespec_to_jiffies(value);
3984
3985 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
3986}
3987
3988
3989
3990
3991
3992
3993
3994static inline void
3995wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3996{
3997 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
3998
3999
4000
4001
4002
4003 tmp_jiffies = jiffies;
4004 target_jiffies = timestamp_jiffies +
4005 msecs_to_jiffies_timeout(to_wait_ms);
4006
4007 if (time_after(target_jiffies, tmp_jiffies)) {
4008 remaining_jiffies = target_jiffies - tmp_jiffies;
4009 while (remaining_jiffies)
4010 remaining_jiffies =
4011 schedule_timeout_uninterruptible(remaining_jiffies);
4012 }
4013}
4014
4015static inline bool
4016__i915_request_irq_complete(const struct i915_request *rq)
4017{
4018 struct intel_engine_cs *engine = rq->engine;
4019 u32 seqno;
4020
4021
4022
4023
4024
4025
4026
4027 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
4028 return true;
4029
4030
4031
4032
4033
4034
4035
4036 seqno = i915_request_global_seqno(rq);
4037 if (!seqno)
4038 return false;
4039
4040
4041
4042
4043 if (__i915_request_completed(rq, seqno))
4044 return true;
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057 if (engine->irq_seqno_barrier &&
4058 test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
4059 struct intel_breadcrumbs *b = &engine->breadcrumbs;
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073 engine->irq_seqno_barrier(engine);
4074
4075
4076
4077
4078
4079
4080
4081 spin_lock_irq(&b->irq_lock);
4082 if (b->irq_wait && b->irq_wait->tsk != current)
4083
4084
4085
4086
4087
4088
4089 wake_up_process(b->irq_wait->tsk);
4090 spin_unlock_irq(&b->irq_lock);
4091
4092 if (__i915_request_completed(rq, seqno))
4093 return true;
4094 }
4095
4096 return false;
4097}
4098
4099void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
4100bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112#define i915_can_memcpy_from_wc(dst, src, len) \
4113 i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
4114
4115#define i915_has_memcpy_from_wc() \
4116 i915_memcpy_from_wc(NULL, NULL, 0)
4117
4118
4119int remap_io_mapping(struct vm_area_struct *vma,
4120 unsigned long addr, unsigned long pfn, unsigned long size,
4121 struct io_mapping *iomap);
4122
4123static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
4124{
4125 if (INTEL_GEN(i915) >= 10)
4126 return CNL_HWS_CSB_WRITE_INDEX;
4127 else
4128 return I915_HWS_CSB_WRITE_INDEX;
4129}
4130
4131#endif
4132