1
2
3
4
5
6#ifndef _INTEL_GUC_H_
7#define _INTEL_GUC_H_
8
9#include <linux/xarray.h>
10#include <linux/delay.h>
11
12#include "intel_uncore.h"
13#include "intel_guc_fw.h"
14#include "intel_guc_fwif.h"
15#include "intel_guc_ct.h"
16#include "intel_guc_log.h"
17#include "intel_guc_reg.h"
18#include "intel_guc_slpc_types.h"
19#include "intel_uc_fw.h"
20#include "i915_utils.h"
21#include "i915_vma.h"
22
23struct __guc_ads_blob;
24
25
26
27
28
29
30struct intel_guc {
31 struct intel_uc_fw fw;
32 struct intel_guc_log log;
33 struct intel_guc_ct ct;
34 struct intel_guc_slpc slpc;
35
36
37 struct i915_sched_engine *sched_engine;
38 struct i915_request *stalled_request;
39
40
41 spinlock_t irq_lock;
42 unsigned int msg_enabled_mask;
43
44 atomic_t outstanding_submission_g2h;
45
46 struct {
47 void (*reset)(struct intel_guc *guc);
48 void (*enable)(struct intel_guc *guc);
49 void (*disable)(struct intel_guc *guc);
50 } interrupts;
51
52
53
54
55
56 spinlock_t contexts_lock;
57 struct ida guc_ids;
58 struct list_head guc_id_list;
59
60 bool submission_supported;
61 bool submission_selected;
62 bool rc_supported;
63 bool rc_selected;
64
65 struct i915_vma *ads_vma;
66 struct __guc_ads_blob *ads_blob;
67 u32 ads_regset_size;
68 u32 ads_golden_ctxt_size;
69
70 struct i915_vma *lrc_desc_pool;
71 void *lrc_desc_pool_vaddr;
72
73
74 struct xarray context_lookup;
75
76
77 u32 params[GUC_CTL_MAX_DWORDS];
78
79
80 struct {
81 u32 base;
82 unsigned int count;
83 enum forcewake_domains fw_domains;
84 } send_regs;
85
86
87 i915_reg_t notify_reg;
88
89
90 u32 mmio_msg;
91
92
93 struct mutex send_mutex;
94};
95
96static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
97{
98 return container_of(log, struct intel_guc, log);
99}
100
101static
102inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
103{
104 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0);
105}
106
107static
108inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len,
109 u32 g2h_len_dw)
110{
111 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0,
112 MAKE_SEND_FLAGS(g2h_len_dw));
113}
114
115static inline int
116intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
117 u32 *response_buf, u32 response_buf_size)
118{
119 return intel_guc_ct_send(&guc->ct, action, len,
120 response_buf, response_buf_size, 0);
121}
122
123static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
124 const u32 *action,
125 u32 len,
126 u32 g2h_len_dw,
127 bool loop)
128{
129 int err;
130 unsigned int sleep_period_ms = 1;
131 bool not_atomic = !in_atomic() && !irqs_disabled();
132
133
134
135
136
137
138
139
140
141 might_sleep_if(loop && not_atomic);
142
143retry:
144 err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
145 if (unlikely(err == -EBUSY && loop)) {
146 if (likely(not_atomic)) {
147 if (msleep_interruptible(sleep_period_ms))
148 return -EINTR;
149 sleep_period_ms = sleep_period_ms << 1;
150 } else {
151 cpu_relax();
152 }
153 goto retry;
154 }
155
156 return err;
157}
158
159static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
160{
161 intel_guc_ct_event_handler(&guc->ct);
162}
163
164
165#define GUC_GGTT_TOP 0xFEE00000
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
181 struct i915_vma *vma)
182{
183 u32 offset = i915_ggtt_offset(vma);
184
185 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
186 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
187
188 return offset;
189}
190
191void intel_guc_init_early(struct intel_guc *guc);
192void intel_guc_init_late(struct intel_guc *guc);
193void intel_guc_init_send_regs(struct intel_guc *guc);
194void intel_guc_write_params(struct intel_guc *guc);
195int intel_guc_init(struct intel_guc *guc);
196void intel_guc_fini(struct intel_guc *guc);
197void intel_guc_notify(struct intel_guc *guc);
198int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
199 u32 *response_buf, u32 response_buf_size);
200int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
201 const u32 *payload, u32 len);
202int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
203int intel_guc_suspend(struct intel_guc *guc);
204int intel_guc_resume(struct intel_guc *guc);
205struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
206int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
207 struct i915_vma **out_vma, void **out_vaddr);
208
209static inline bool intel_guc_is_supported(struct intel_guc *guc)
210{
211 return intel_uc_fw_is_supported(&guc->fw);
212}
213
214static inline bool intel_guc_is_wanted(struct intel_guc *guc)
215{
216 return intel_uc_fw_is_enabled(&guc->fw);
217}
218
219static inline bool intel_guc_is_used(struct intel_guc *guc)
220{
221 GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
222 return intel_uc_fw_is_available(&guc->fw);
223}
224
225static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
226{
227 return intel_uc_fw_is_running(&guc->fw);
228}
229
230static inline bool intel_guc_is_ready(struct intel_guc *guc)
231{
232 return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
233}
234
235static inline void intel_guc_reset_interrupts(struct intel_guc *guc)
236{
237 guc->interrupts.reset(guc);
238}
239
240static inline void intel_guc_enable_interrupts(struct intel_guc *guc)
241{
242 guc->interrupts.enable(guc);
243}
244
245static inline void intel_guc_disable_interrupts(struct intel_guc *guc)
246{
247 guc->interrupts.disable(guc);
248}
249
250static inline int intel_guc_sanitize(struct intel_guc *guc)
251{
252 intel_uc_fw_sanitize(&guc->fw);
253 intel_guc_disable_interrupts(guc);
254 intel_guc_ct_sanitize(&guc->ct);
255 guc->mmio_msg = 0;
256
257 return 0;
258}
259
260static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
261{
262 spin_lock_irq(&guc->irq_lock);
263 guc->msg_enabled_mask |= mask;
264 spin_unlock_irq(&guc->irq_lock);
265}
266
267static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
268{
269 spin_lock_irq(&guc->irq_lock);
270 guc->msg_enabled_mask &= ~mask;
271 spin_unlock_irq(&guc->irq_lock);
272}
273
274int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout);
275
276int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
277 const u32 *msg, u32 len);
278int intel_guc_sched_done_process_msg(struct intel_guc *guc,
279 const u32 *msg, u32 len);
280int intel_guc_context_reset_process_msg(struct intel_guc *guc,
281 const u32 *msg, u32 len);
282int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
283 const u32 *msg, u32 len);
284
285void intel_guc_find_hung_context(struct intel_engine_cs *engine);
286
287int intel_guc_global_policies_update(struct intel_guc *guc);
288
289void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
290
291void intel_guc_submission_reset_prepare(struct intel_guc *guc);
292void intel_guc_submission_reset(struct intel_guc *guc, bool stalled);
293void intel_guc_submission_reset_finish(struct intel_guc *guc);
294void intel_guc_submission_cancel_requests(struct intel_guc *guc);
295
296void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
297
298#endif
299