linux/drivers/gpu/drm/i915/gt/uc/intel_guc.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright © 2014-2019 Intel Corporation
   4 */
   5
   6#ifndef _INTEL_GUC_H_
   7#define _INTEL_GUC_H_
   8
   9#include <linux/xarray.h>
  10#include <linux/delay.h>
  11
  12#include "intel_uncore.h"
  13#include "intel_guc_fw.h"
  14#include "intel_guc_fwif.h"
  15#include "intel_guc_ct.h"
  16#include "intel_guc_log.h"
  17#include "intel_guc_reg.h"
  18#include "intel_guc_slpc_types.h"
  19#include "intel_uc_fw.h"
  20#include "i915_utils.h"
  21#include "i915_vma.h"
  22
  23struct __guc_ads_blob;
  24
  25/**
  26 * struct intel_guc - Top level structure of GuC.
  27 *
  28 * It handles firmware loading and manages client pool. intel_guc owns an
  29 * i915_sched_engine for submission.
  30 */
  31struct intel_guc {
  32        /** @fw: the GuC firmware */
  33        struct intel_uc_fw fw;
  34        /** @log: sub-structure containing GuC log related data and objects */
  35        struct intel_guc_log log;
  36        /** @ct: the command transport communication channel */
  37        struct intel_guc_ct ct;
  38        /** @slpc: sub-structure containing SLPC related data and objects */
  39        struct intel_guc_slpc slpc;
  40
  41        /** @sched_engine: Global engine used to submit requests to GuC */
  42        struct i915_sched_engine *sched_engine;
  43        /**
  44         * @stalled_request: if GuC can't process a request for any reason, we
  45         * save it until GuC restarts processing. No other request can be
  46         * submitted until the stalled request is processed.
  47         */
  48        struct i915_request *stalled_request;
  49        /**
  50         * @submission_stall_reason: reason why submission is stalled
  51         */
  52        enum {
  53                STALL_NONE,
  54                STALL_REGISTER_CONTEXT,
  55                STALL_MOVE_LRC_TAIL,
  56                STALL_ADD_REQUEST,
  57        } submission_stall_reason;
  58
  59        /* intel_guc_recv interrupt related state */
  60        /** @irq_lock: protects GuC irq state */
  61        spinlock_t irq_lock;
  62        /**
  63         * @msg_enabled_mask: mask of events that are processed when receiving
  64         * an INTEL_GUC_ACTION_DEFAULT G2H message.
  65         */
  66        unsigned int msg_enabled_mask;
  67
  68        /**
  69         * @outstanding_submission_g2h: number of outstanding GuC to Host
  70         * responses related to GuC submission, used to determine if the GT is
  71         * idle
  72         */
  73        atomic_t outstanding_submission_g2h;
  74
  75        /** @interrupts: pointers to GuC interrupt-managing functions. */
  76        struct {
  77                void (*reset)(struct intel_guc *guc);
  78                void (*enable)(struct intel_guc *guc);
  79                void (*disable)(struct intel_guc *guc);
  80        } interrupts;
  81
  82        /**
  83         * @submission_state: sub-structure for submission state protected by
  84         * single lock
  85         */
  86        struct {
  87                /**
  88                 * @lock: protects everything in submission_state,
  89                 * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
  90                 * out of zero
  91                 */
  92                spinlock_t lock;
  93                /**
  94                 * @guc_ids: used to allocate new guc_ids, single-lrc
  95                 */
  96                struct ida guc_ids;
  97                /**
  98                 * @num_guc_ids: Number of guc_ids, selftest feature to be able
  99                 * to reduce this number while testing.
 100                 */
 101                int num_guc_ids;
 102                /**
 103                 * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
 104                 */
 105                unsigned long *guc_ids_bitmap;
 106                /**
 107                 * @guc_id_list: list of intel_context with valid guc_ids but no
 108                 * refs
 109                 */
 110                struct list_head guc_id_list;
 111                /**
 112                 * @destroyed_contexts: list of contexts waiting to be destroyed
 113                 * (deregistered with the GuC)
 114                 */
 115                struct list_head destroyed_contexts;
 116                /**
 117                 * @destroyed_worker: worker to deregister contexts, need as we
 118                 * need to take a GT PM reference and can't from destroy
 119                 * function as it might be in an atomic context (no sleeping)
 120                 */
 121                struct work_struct destroyed_worker;
 122        } submission_state;
 123
 124        /**
 125         * @submission_supported: tracks whether we support GuC submission on
 126         * the current platform
 127         */
 128        bool submission_supported;
 129        /** @submission_selected: tracks whether the user enabled GuC submission */
 130        bool submission_selected;
 131        /**
 132         * @rc_supported: tracks whether we support GuC rc on the current platform
 133         */
 134        bool rc_supported;
 135        /** @rc_selected: tracks whether the user enabled GuC rc */
 136        bool rc_selected;
 137
 138        /** @ads_vma: object allocated to hold the GuC ADS */
 139        struct i915_vma *ads_vma;
 140        /** @ads_blob: contents of the GuC ADS */
 141        struct __guc_ads_blob *ads_blob;
 142        /** @ads_regset_size: size of the save/restore regsets in the ADS */
 143        u32 ads_regset_size;
 144        /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
 145        u32 ads_golden_ctxt_size;
 146        /** @ads_engine_usage_size: size of engine usage in the ADS */
 147        u32 ads_engine_usage_size;
 148
 149        /** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
 150        struct i915_vma *lrc_desc_pool;
 151        /** @lrc_desc_pool_vaddr: contents of the GuC LRC descriptor pool */
 152        void *lrc_desc_pool_vaddr;
 153
 154        /**
 155         * @context_lookup: used to resolve intel_context from guc_id, if a
 156         * context is present in this structure it is registered with the GuC
 157         */
 158        struct xarray context_lookup;
 159
 160        /** @params: Control params for fw initialization */
 161        u32 params[GUC_CTL_MAX_DWORDS];
 162
 163        /** @send_regs: GuC's FW specific registers used for sending MMIO H2G */
 164        struct {
 165                u32 base;
 166                unsigned int count;
 167                enum forcewake_domains fw_domains;
 168        } send_regs;
 169
 170        /** @notify_reg: register used to send interrupts to the GuC FW */
 171        i915_reg_t notify_reg;
 172
 173        /**
 174         * @mmio_msg: notification bitmask that the GuC writes in one of its
 175         * registers when the CT channel is disabled, to be processed when the
 176         * channel is back up.
 177         */
 178        u32 mmio_msg;
 179
 180        /** @send_mutex: used to serialize the intel_guc_send actions */
 181        struct mutex send_mutex;
 182
 183        /**
 184         * @timestamp: GT timestamp object that stores a copy of the timestamp
 185         * and adjusts it for overflow using a worker.
 186         */
 187        struct {
 188                /**
 189                 * @lock: Lock protecting the below fields and the engine stats.
 190                 */
 191                spinlock_t lock;
 192
 193                /**
 194                 * @gt_stamp: 64 bit extended value of the GT timestamp.
 195                 */
 196                u64 gt_stamp;
 197
 198                /**
 199                 * @ping_delay: Period for polling the GT timestamp for
 200                 * overflow.
 201                 */
 202                unsigned long ping_delay;
 203
 204                /**
 205                 * @work: Periodic work to adjust GT timestamp, engine and
 206                 * context usage for overflows.
 207                 */
 208                struct delayed_work work;
 209
 210                /**
 211                 * @shift: Right shift value for the gpm timestamp
 212                 */
 213                u32 shift;
 214        } timestamp;
 215
 216#ifdef CONFIG_DRM_I915_SELFTEST
 217        /**
 218         * @number_guc_id_stolen: The number of guc_ids that have been stolen
 219         */
 220        int number_guc_id_stolen;
 221#endif
 222};
 223
 224static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
 225{
 226        return container_of(log, struct intel_guc, log);
 227}
 228
 229static
 230inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
 231{
 232        return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0);
 233}
 234
 235static
 236inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len,
 237                             u32 g2h_len_dw)
 238{
 239        return intel_guc_ct_send(&guc->ct, action, len, NULL, 0,
 240                                 MAKE_SEND_FLAGS(g2h_len_dw));
 241}
 242
 243static inline int
 244intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
 245                           u32 *response_buf, u32 response_buf_size)
 246{
 247        return intel_guc_ct_send(&guc->ct, action, len,
 248                                 response_buf, response_buf_size, 0);
 249}
 250
 251static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
 252                                           const u32 *action,
 253                                           u32 len,
 254                                           u32 g2h_len_dw,
 255                                           bool loop)
 256{
 257        int err;
 258        unsigned int sleep_period_ms = 1;
 259        bool not_atomic = !in_atomic() && !irqs_disabled();
 260
 261        /*
 262         * FIXME: Have caller pass in if we are in an atomic context to avoid
 263         * using in_atomic(). It is likely safe here as we check for irqs
 264         * disabled which basically all the spin locks in the i915 do but
 265         * regardless this should be cleaned up.
 266         */
 267
 268        /* No sleeping with spin locks, just busy loop */
 269        might_sleep_if(loop && not_atomic);
 270
 271retry:
 272        err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
 273        if (unlikely(err == -EBUSY && loop)) {
 274                if (likely(not_atomic)) {
 275                        if (msleep_interruptible(sleep_period_ms))
 276                                return -EINTR;
 277                        sleep_period_ms = sleep_period_ms << 1;
 278                } else {
 279                        cpu_relax();
 280                }
 281                goto retry;
 282        }
 283
 284        return err;
 285}
 286
 287static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
 288{
 289        intel_guc_ct_event_handler(&guc->ct);
 290}
 291
 292/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
 293#define GUC_GGTT_TOP    0xFEE00000
 294
 295/**
 296 * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
 297 * @guc: intel_guc structure.
 298 * @vma: i915 graphics virtual memory area.
 299 *
 300 * GuC does not allow any gfx GGTT address that falls into range
 301 * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
 302 * Currently, in order to exclude [0, ggtt.pin_bias) address space from
 303 * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
 304 * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
 305 *
 306 * Return: GGTT offset of the @vma.
 307 */
 308static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
 309                                        struct i915_vma *vma)
 310{
 311        u32 offset = i915_ggtt_offset(vma);
 312
 313        GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
 314        GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
 315
 316        return offset;
 317}
 318
 319void intel_guc_init_early(struct intel_guc *guc);
 320void intel_guc_init_late(struct intel_guc *guc);
 321void intel_guc_init_send_regs(struct intel_guc *guc);
 322void intel_guc_write_params(struct intel_guc *guc);
 323int intel_guc_init(struct intel_guc *guc);
 324void intel_guc_fini(struct intel_guc *guc);
 325void intel_guc_notify(struct intel_guc *guc);
 326int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 327                        u32 *response_buf, u32 response_buf_size);
 328int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
 329                                       const u32 *payload, u32 len);
 330int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
 331int intel_guc_suspend(struct intel_guc *guc);
 332int intel_guc_resume(struct intel_guc *guc);
 333struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
 334int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
 335                                   struct i915_vma **out_vma, void **out_vaddr);
 336
 337static inline bool intel_guc_is_supported(struct intel_guc *guc)
 338{
 339        return intel_uc_fw_is_supported(&guc->fw);
 340}
 341
 342static inline bool intel_guc_is_wanted(struct intel_guc *guc)
 343{
 344        return intel_uc_fw_is_enabled(&guc->fw);
 345}
 346
 347static inline bool intel_guc_is_used(struct intel_guc *guc)
 348{
 349        GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
 350        return intel_uc_fw_is_available(&guc->fw);
 351}
 352
 353static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
 354{
 355        return intel_uc_fw_is_running(&guc->fw);
 356}
 357
 358static inline bool intel_guc_is_ready(struct intel_guc *guc)
 359{
 360        return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
 361}
 362
 363static inline void intel_guc_reset_interrupts(struct intel_guc *guc)
 364{
 365        guc->interrupts.reset(guc);
 366}
 367
 368static inline void intel_guc_enable_interrupts(struct intel_guc *guc)
 369{
 370        guc->interrupts.enable(guc);
 371}
 372
 373static inline void intel_guc_disable_interrupts(struct intel_guc *guc)
 374{
 375        guc->interrupts.disable(guc);
 376}
 377
 378static inline int intel_guc_sanitize(struct intel_guc *guc)
 379{
 380        intel_uc_fw_sanitize(&guc->fw);
 381        intel_guc_disable_interrupts(guc);
 382        intel_guc_ct_sanitize(&guc->ct);
 383        guc->mmio_msg = 0;
 384
 385        return 0;
 386}
 387
 388static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
 389{
 390        spin_lock_irq(&guc->irq_lock);
 391        guc->msg_enabled_mask |= mask;
 392        spin_unlock_irq(&guc->irq_lock);
 393}
 394
 395static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
 396{
 397        spin_lock_irq(&guc->irq_lock);
 398        guc->msg_enabled_mask &= ~mask;
 399        spin_unlock_irq(&guc->irq_lock);
 400}
 401
 402int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout);
 403
 404int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
 405                                          const u32 *msg, u32 len);
 406int intel_guc_sched_done_process_msg(struct intel_guc *guc,
 407                                     const u32 *msg, u32 len);
 408int intel_guc_context_reset_process_msg(struct intel_guc *guc,
 409                                        const u32 *msg, u32 len);
 410int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
 411                                         const u32 *msg, u32 len);
 412
 413void intel_guc_find_hung_context(struct intel_engine_cs *engine);
 414
 415int intel_guc_global_policies_update(struct intel_guc *guc);
 416
 417void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
 418
 419void intel_guc_submission_reset_prepare(struct intel_guc *guc);
 420void intel_guc_submission_reset(struct intel_guc *guc, bool stalled);
 421void intel_guc_submission_reset_finish(struct intel_guc *guc);
 422void intel_guc_submission_cancel_requests(struct intel_guc *guc);
 423
 424void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
 425
 426void intel_guc_write_barrier(struct intel_guc *guc);
 427
 428#endif
 429