linux/drivers/gpu/drm/i915/intel_guc.h
<<
>>
Prefs
   1/*
   2 * Copyright © 2014-2017 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#ifndef _INTEL_GUC_H_
  26#define _INTEL_GUC_H_
  27
  28#include "intel_uncore.h"
  29#include "intel_guc_fw.h"
  30#include "intel_guc_fwif.h"
  31#include "intel_guc_ct.h"
  32#include "intel_guc_log.h"
  33#include "intel_guc_reg.h"
  34#include "intel_uc_fw.h"
  35#include "i915_utils.h"
  36#include "i915_vma.h"
  37
  38struct guc_preempt_work {
  39        struct work_struct work;
  40        struct intel_engine_cs *engine;
  41};
  42
  43/*
  44 * Top level structure of GuC. It handles firmware loading and manages client
  45 * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
  46 * ExecList submission.
  47 */
  48struct intel_guc {
  49        struct intel_uc_fw fw;
  50        struct intel_guc_log log;
  51        struct intel_guc_ct ct;
  52
  53        /* Log snapshot if GuC errors during load */
  54        struct drm_i915_gem_object *load_err_log;
  55
  56        /* intel_guc_recv interrupt related state */
  57        spinlock_t irq_lock;
  58        unsigned int msg_enabled_mask;
  59
  60        struct {
  61                bool enabled;
  62                void (*reset)(struct drm_i915_private *i915);
  63                void (*enable)(struct drm_i915_private *i915);
  64                void (*disable)(struct drm_i915_private *i915);
  65        } interrupts;
  66
  67        struct i915_vma *ads_vma;
  68        struct i915_vma *stage_desc_pool;
  69        void *stage_desc_pool_vaddr;
  70        struct ida stage_ids;
  71        struct i915_vma *shared_data;
  72        void *shared_data_vaddr;
  73
  74        struct intel_guc_client *execbuf_client;
  75        struct intel_guc_client *preempt_client;
  76
  77        struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
  78        struct workqueue_struct *preempt_wq;
  79
  80        DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
  81        /* Cyclic counter mod pagesize  */
  82        u32 db_cacheline;
  83
  84        /* GuC's FW specific registers used in MMIO send */
  85        struct {
  86                u32 base;
  87                unsigned int count;
  88                enum forcewake_domains fw_domains;
  89        } send_regs;
  90
  91        /* To serialize the intel_guc_send actions */
  92        struct mutex send_mutex;
  93
  94        /* GuC's FW specific send function */
  95        int (*send)(struct intel_guc *guc, const u32 *data, u32 len,
  96                    u32 *response_buf, u32 response_buf_size);
  97
  98        /* GuC's FW specific event handler function */
  99        void (*handler)(struct intel_guc *guc);
 100
 101        /* GuC's FW specific notify function */
 102        void (*notify)(struct intel_guc *guc);
 103};
 104
 105static
 106inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
 107{
 108        return guc->send(guc, action, len, NULL, 0);
 109}
 110
 111static inline int
 112intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
 113                           u32 *response_buf, u32 response_buf_size)
 114{
 115        return guc->send(guc, action, len, response_buf, response_buf_size);
 116}
 117
 118static inline void intel_guc_notify(struct intel_guc *guc)
 119{
 120        guc->notify(guc);
 121}
 122
 123static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
 124{
 125        guc->handler(guc);
 126}
 127
 128/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
 129#define GUC_GGTT_TOP    0xFEE00000
 130
 131/**
 132 * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
 133 * @guc: intel_guc structure.
 134 * @vma: i915 graphics virtual memory area.
 135 *
 136 * GuC does not allow any gfx GGTT address that falls into range
 137 * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
 138 * Currently, in order to exclude [0, ggtt.pin_bias) address space from
 139 * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
 140 * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
 141 *
 142 * Return: GGTT offset of the @vma.
 143 */
 144static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
 145                                        struct i915_vma *vma)
 146{
 147        u32 offset = i915_ggtt_offset(vma);
 148
 149        GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
 150        GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
 151
 152        return offset;
 153}
 154
 155void intel_guc_init_early(struct intel_guc *guc);
 156void intel_guc_init_send_regs(struct intel_guc *guc);
 157void intel_guc_init_params(struct intel_guc *guc);
 158int intel_guc_init_misc(struct intel_guc *guc);
 159int intel_guc_init(struct intel_guc *guc);
 160void intel_guc_fini(struct intel_guc *guc);
 161void intel_guc_fini_misc(struct intel_guc *guc);
 162int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
 163                       u32 *response_buf, u32 response_buf_size);
 164int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
 165                        u32 *response_buf, u32 response_buf_size);
 166void intel_guc_to_host_event_handler(struct intel_guc *guc);
 167void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
 168int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
 169                                       const u32 *payload, u32 len);
 170int intel_guc_sample_forcewake(struct intel_guc *guc);
 171int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
 172int intel_guc_suspend(struct intel_guc *guc);
 173int intel_guc_resume(struct intel_guc *guc);
 174struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
 175
 176static inline bool intel_guc_is_loaded(struct intel_guc *guc)
 177{
 178        return intel_uc_fw_is_loaded(&guc->fw);
 179}
 180
 181static inline int intel_guc_sanitize(struct intel_guc *guc)
 182{
 183        intel_uc_fw_sanitize(&guc->fw);
 184        return 0;
 185}
 186
 187static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
 188{
 189        spin_lock_irq(&guc->irq_lock);
 190        guc->msg_enabled_mask |= mask;
 191        spin_unlock_irq(&guc->irq_lock);
 192}
 193
 194static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
 195{
 196        spin_lock_irq(&guc->irq_lock);
 197        guc->msg_enabled_mask &= ~mask;
 198        spin_unlock_irq(&guc->irq_lock);
 199}
 200
 201int intel_guc_reset_engine(struct intel_guc *guc,
 202                           struct intel_engine_cs *engine);
 203
 204#endif
 205