1
2
3
4
5
6#ifndef _INTEL_GUC_H_
7#define _INTEL_GUC_H_
8
9#include <linux/xarray.h>
10#include <linux/delay.h>
11
12#include "intel_uncore.h"
13#include "intel_guc_fw.h"
14#include "intel_guc_fwif.h"
15#include "intel_guc_ct.h"
16#include "intel_guc_log.h"
17#include "intel_guc_reg.h"
18#include "intel_guc_slpc_types.h"
19#include "intel_uc_fw.h"
20#include "i915_utils.h"
21#include "i915_vma.h"
22
23struct __guc_ads_blob;
24
25
26
27
28
29
30
31struct intel_guc {
32
33 struct intel_uc_fw fw;
34
35 struct intel_guc_log log;
36
37 struct intel_guc_ct ct;
38
39 struct intel_guc_slpc slpc;
40
41
42 struct i915_sched_engine *sched_engine;
43
44
45
46
47
48 struct i915_request *stalled_request;
49
50
51
52 enum {
53 STALL_NONE,
54 STALL_REGISTER_CONTEXT,
55 STALL_MOVE_LRC_TAIL,
56 STALL_ADD_REQUEST,
57 } submission_stall_reason;
58
59
60
61 spinlock_t irq_lock;
62
63
64
65
66 unsigned int msg_enabled_mask;
67
68
69
70
71
72
73 atomic_t outstanding_submission_g2h;
74
75
76 struct {
77 void (*reset)(struct intel_guc *guc);
78 void (*enable)(struct intel_guc *guc);
79 void (*disable)(struct intel_guc *guc);
80 } interrupts;
81
82
83
84
85
86 struct {
87
88
89
90
91
92 spinlock_t lock;
93
94
95
96 struct ida guc_ids;
97
98
99
100
101 int num_guc_ids;
102
103
104
105 unsigned long *guc_ids_bitmap;
106
107
108
109
110 struct list_head guc_id_list;
111
112
113
114
115 struct list_head destroyed_contexts;
116
117
118
119
120
121 struct work_struct destroyed_worker;
122 } submission_state;
123
124
125
126
127
128 bool submission_supported;
129
130 bool submission_selected;
131
132
133
134 bool rc_supported;
135
136 bool rc_selected;
137
138
139 struct i915_vma *ads_vma;
140
141 struct __guc_ads_blob *ads_blob;
142
143 u32 ads_regset_size;
144
145 u32 ads_golden_ctxt_size;
146
147 u32 ads_engine_usage_size;
148
149
150 struct i915_vma *lrc_desc_pool;
151
152 void *lrc_desc_pool_vaddr;
153
154
155
156
157
158 struct xarray context_lookup;
159
160
161 u32 params[GUC_CTL_MAX_DWORDS];
162
163
164 struct {
165 u32 base;
166 unsigned int count;
167 enum forcewake_domains fw_domains;
168 } send_regs;
169
170
171 i915_reg_t notify_reg;
172
173
174
175
176
177
178 u32 mmio_msg;
179
180
181 struct mutex send_mutex;
182
183
184
185
186
187 struct {
188
189
190
191 spinlock_t lock;
192
193
194
195
196 u64 gt_stamp;
197
198
199
200
201
202 unsigned long ping_delay;
203
204
205
206
207
208 struct delayed_work work;
209
210
211
212
213 u32 shift;
214 } timestamp;
215
216#ifdef CONFIG_DRM_I915_SELFTEST
217
218
219
220 int number_guc_id_stolen;
221#endif
222};
223
224static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
225{
226 return container_of(log, struct intel_guc, log);
227}
228
229static
230inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
231{
232 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0);
233}
234
235static
236inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len,
237 u32 g2h_len_dw)
238{
239 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0,
240 MAKE_SEND_FLAGS(g2h_len_dw));
241}
242
243static inline int
244intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
245 u32 *response_buf, u32 response_buf_size)
246{
247 return intel_guc_ct_send(&guc->ct, action, len,
248 response_buf, response_buf_size, 0);
249}
250
251static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
252 const u32 *action,
253 u32 len,
254 u32 g2h_len_dw,
255 bool loop)
256{
257 int err;
258 unsigned int sleep_period_ms = 1;
259 bool not_atomic = !in_atomic() && !irqs_disabled();
260
261
262
263
264
265
266
267
268
269 might_sleep_if(loop && not_atomic);
270
271retry:
272 err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
273 if (unlikely(err == -EBUSY && loop)) {
274 if (likely(not_atomic)) {
275 if (msleep_interruptible(sleep_period_ms))
276 return -EINTR;
277 sleep_period_ms = sleep_period_ms << 1;
278 } else {
279 cpu_relax();
280 }
281 goto retry;
282 }
283
284 return err;
285}
286
287static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
288{
289 intel_guc_ct_event_handler(&guc->ct);
290}
291
292
293#define GUC_GGTT_TOP 0xFEE00000
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
309 struct i915_vma *vma)
310{
311 u32 offset = i915_ggtt_offset(vma);
312
313 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
314 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
315
316 return offset;
317}
318
319void intel_guc_init_early(struct intel_guc *guc);
320void intel_guc_init_late(struct intel_guc *guc);
321void intel_guc_init_send_regs(struct intel_guc *guc);
322void intel_guc_write_params(struct intel_guc *guc);
323int intel_guc_init(struct intel_guc *guc);
324void intel_guc_fini(struct intel_guc *guc);
325void intel_guc_notify(struct intel_guc *guc);
326int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
327 u32 *response_buf, u32 response_buf_size);
328int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
329 const u32 *payload, u32 len);
330int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
331int intel_guc_suspend(struct intel_guc *guc);
332int intel_guc_resume(struct intel_guc *guc);
333struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
334int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
335 struct i915_vma **out_vma, void **out_vaddr);
336
337static inline bool intel_guc_is_supported(struct intel_guc *guc)
338{
339 return intel_uc_fw_is_supported(&guc->fw);
340}
341
342static inline bool intel_guc_is_wanted(struct intel_guc *guc)
343{
344 return intel_uc_fw_is_enabled(&guc->fw);
345}
346
347static inline bool intel_guc_is_used(struct intel_guc *guc)
348{
349 GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
350 return intel_uc_fw_is_available(&guc->fw);
351}
352
353static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
354{
355 return intel_uc_fw_is_running(&guc->fw);
356}
357
358static inline bool intel_guc_is_ready(struct intel_guc *guc)
359{
360 return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
361}
362
363static inline void intel_guc_reset_interrupts(struct intel_guc *guc)
364{
365 guc->interrupts.reset(guc);
366}
367
368static inline void intel_guc_enable_interrupts(struct intel_guc *guc)
369{
370 guc->interrupts.enable(guc);
371}
372
373static inline void intel_guc_disable_interrupts(struct intel_guc *guc)
374{
375 guc->interrupts.disable(guc);
376}
377
378static inline int intel_guc_sanitize(struct intel_guc *guc)
379{
380 intel_uc_fw_sanitize(&guc->fw);
381 intel_guc_disable_interrupts(guc);
382 intel_guc_ct_sanitize(&guc->ct);
383 guc->mmio_msg = 0;
384
385 return 0;
386}
387
388static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
389{
390 spin_lock_irq(&guc->irq_lock);
391 guc->msg_enabled_mask |= mask;
392 spin_unlock_irq(&guc->irq_lock);
393}
394
395static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
396{
397 spin_lock_irq(&guc->irq_lock);
398 guc->msg_enabled_mask &= ~mask;
399 spin_unlock_irq(&guc->irq_lock);
400}
401
402int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout);
403
404int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
405 const u32 *msg, u32 len);
406int intel_guc_sched_done_process_msg(struct intel_guc *guc,
407 const u32 *msg, u32 len);
408int intel_guc_context_reset_process_msg(struct intel_guc *guc,
409 const u32 *msg, u32 len);
410int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
411 const u32 *msg, u32 len);
412
413void intel_guc_find_hung_context(struct intel_engine_cs *engine);
414
415int intel_guc_global_policies_update(struct intel_guc *guc);
416
417void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
418
419void intel_guc_submission_reset_prepare(struct intel_guc *guc);
420void intel_guc_submission_reset(struct intel_guc *guc, bool stalled);
421void intel_guc_submission_reset_finish(struct intel_guc *guc);
422void intel_guc_submission_cancel_requests(struct intel_guc *guc);
423
424void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
425
426void intel_guc_write_barrier(struct intel_guc *guc);
427
428#endif
429