1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef _INTEL_GUC_H_
26#define _INTEL_GUC_H_
27
28#include "intel_uncore.h"
29#include "intel_guc_fw.h"
30#include "intel_guc_fwif.h"
31#include "intel_guc_ct.h"
32#include "intel_guc_log.h"
33#include "intel_guc_reg.h"
34#include "intel_uc_fw.h"
35#include "i915_utils.h"
36#include "i915_vma.h"
37
38struct guc_preempt_work {
39 struct work_struct work;
40 struct intel_engine_cs *engine;
41};
42
43
44
45
46
47
48struct intel_guc {
49 struct intel_uc_fw fw;
50 struct intel_guc_log log;
51 struct intel_guc_ct ct;
52
53
54 struct drm_i915_gem_object *load_err_log;
55
56
57 spinlock_t irq_lock;
58 unsigned int msg_enabled_mask;
59
60 struct {
61 bool enabled;
62 void (*reset)(struct drm_i915_private *i915);
63 void (*enable)(struct drm_i915_private *i915);
64 void (*disable)(struct drm_i915_private *i915);
65 } interrupts;
66
67 struct i915_vma *ads_vma;
68 struct i915_vma *stage_desc_pool;
69 void *stage_desc_pool_vaddr;
70 struct ida stage_ids;
71 struct i915_vma *shared_data;
72 void *shared_data_vaddr;
73
74 struct intel_guc_client *execbuf_client;
75 struct intel_guc_client *preempt_client;
76
77 struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
78 struct workqueue_struct *preempt_wq;
79
80 DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
81
82 u32 db_cacheline;
83
84
85 struct {
86 u32 base;
87 unsigned int count;
88 enum forcewake_domains fw_domains;
89 } send_regs;
90
91
92 struct mutex send_mutex;
93
94
95 int (*send)(struct intel_guc *guc, const u32 *data, u32 len,
96 u32 *response_buf, u32 response_buf_size);
97
98
99 void (*handler)(struct intel_guc *guc);
100
101
102 void (*notify)(struct intel_guc *guc);
103};
104
105static
106inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
107{
108 return guc->send(guc, action, len, NULL, 0);
109}
110
111static inline int
112intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
113 u32 *response_buf, u32 response_buf_size)
114{
115 return guc->send(guc, action, len, response_buf, response_buf_size);
116}
117
118static inline void intel_guc_notify(struct intel_guc *guc)
119{
120 guc->notify(guc);
121}
122
123static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
124{
125 guc->handler(guc);
126}
127
128
129#define GUC_GGTT_TOP 0xFEE00000
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
145 struct i915_vma *vma)
146{
147 u32 offset = i915_ggtt_offset(vma);
148
149 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
150 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
151
152 return offset;
153}
154
155void intel_guc_init_early(struct intel_guc *guc);
156void intel_guc_init_send_regs(struct intel_guc *guc);
157void intel_guc_init_params(struct intel_guc *guc);
158int intel_guc_init_misc(struct intel_guc *guc);
159int intel_guc_init(struct intel_guc *guc);
160void intel_guc_fini(struct intel_guc *guc);
161void intel_guc_fini_misc(struct intel_guc *guc);
162int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
163 u32 *response_buf, u32 response_buf_size);
164int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
165 u32 *response_buf, u32 response_buf_size);
166void intel_guc_to_host_event_handler(struct intel_guc *guc);
167void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
168int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
169 const u32 *payload, u32 len);
170int intel_guc_sample_forcewake(struct intel_guc *guc);
171int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
172int intel_guc_suspend(struct intel_guc *guc);
173int intel_guc_resume(struct intel_guc *guc);
174struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
175
176static inline bool intel_guc_is_loaded(struct intel_guc *guc)
177{
178 return intel_uc_fw_is_loaded(&guc->fw);
179}
180
181static inline int intel_guc_sanitize(struct intel_guc *guc)
182{
183 intel_uc_fw_sanitize(&guc->fw);
184 return 0;
185}
186
187static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
188{
189 spin_lock_irq(&guc->irq_lock);
190 guc->msg_enabled_mask |= mask;
191 spin_unlock_irq(&guc->irq_lock);
192}
193
194static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
195{
196 spin_lock_irq(&guc->irq_lock);
197 guc->msg_enabled_mask &= ~mask;
198 spin_unlock_irq(&guc->irq_lock);
199}
200
201int intel_guc_reset_engine(struct intel_guc *guc,
202 struct intel_engine_cs *engine);
203
204#endif
205