linux/drivers/gpu/drm/i915/gt/intel_context_types.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: MIT */
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#ifndef __INTEL_CONTEXT_TYPES__
   7#define __INTEL_CONTEXT_TYPES__
   8
   9#include <linux/average.h>
  10#include <linux/kref.h>
  11#include <linux/list.h>
  12#include <linux/mutex.h>
  13#include <linux/types.h>
  14
  15#include "i915_active_types.h"
  16#include "i915_sw_fence.h"
  17#include "i915_utils.h"
  18#include "intel_engine_types.h"
  19#include "intel_sseu.h"
  20
  21#include "uc/intel_guc_fwif.h"
  22
  23#define CONTEXT_REDZONE POISON_INUSE
  24DECLARE_EWMA(runtime, 3, 8);
  25
  26struct i915_gem_context;
  27struct i915_gem_ww_ctx;
  28struct i915_vma;
  29struct intel_breadcrumbs;
  30struct intel_context;
  31struct intel_ring;
  32
  33struct intel_context_ops {
  34        unsigned long flags;
  35#define COPS_HAS_INFLIGHT_BIT 0
  36#define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
  37
  38        int (*alloc)(struct intel_context *ce);
  39
  40        void (*ban)(struct intel_context *ce, struct i915_request *rq);
  41
  42        int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
  43        int (*pin)(struct intel_context *ce, void *vaddr);
  44        void (*unpin)(struct intel_context *ce);
  45        void (*post_unpin)(struct intel_context *ce);
  46
  47        void (*cancel_request)(struct intel_context *ce,
  48                               struct i915_request *rq);
  49
  50        void (*enter)(struct intel_context *ce);
  51        void (*exit)(struct intel_context *ce);
  52
  53        void (*sched_disable)(struct intel_context *ce);
  54
  55        void (*reset)(struct intel_context *ce);
  56        void (*destroy)(struct kref *kref);
  57
  58        /* virtual engine/context interface */
  59        struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
  60                                                unsigned int count);
  61        struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
  62                                               unsigned int sibling);
  63};
  64
  65struct intel_context {
  66        /*
  67         * Note: Some fields may be accessed under RCU.
  68         *
  69         * Unless otherwise noted a field can safely be assumed to be protected
  70         * by strong reference counting.
  71         */
  72        union {
  73                struct kref ref; /* no kref_get_unless_zero()! */
  74                struct rcu_head rcu;
  75        };
  76
  77        struct intel_engine_cs *engine;
  78        struct intel_engine_cs *inflight;
  79#define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
  80#define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
  81#define intel_context_inflight(ce) \
  82        __intel_context_inflight(READ_ONCE((ce)->inflight))
  83#define intel_context_inflight_count(ce) \
  84        __intel_context_inflight_count(READ_ONCE((ce)->inflight))
  85
  86        struct i915_address_space *vm;
  87        struct i915_gem_context __rcu *gem_context;
  88
  89        /*
  90         * @signal_lock protects the list of requests that need signaling,
  91         * @signals. While there are any requests that need signaling,
  92         * we add the context to the breadcrumbs worker, and remove it
  93         * upon completion/cancellation of the last request.
  94         */
  95        struct list_head signal_link; /* Accessed under RCU */
  96        struct list_head signals; /* Guarded by signal_lock */
  97        spinlock_t signal_lock; /* protects signals, the list of requests */
  98
  99        struct i915_vma *state;
 100        u32 ring_size;
 101        struct intel_ring *ring;
 102        struct intel_timeline *timeline;
 103
 104        unsigned long flags;
 105#define CONTEXT_BARRIER_BIT             0
 106#define CONTEXT_ALLOC_BIT               1
 107#define CONTEXT_INIT_BIT                2
 108#define CONTEXT_VALID_BIT               3
 109#define CONTEXT_CLOSED_BIT              4
 110#define CONTEXT_USE_SEMAPHORES          5
 111#define CONTEXT_BANNED                  6
 112#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
 113#define CONTEXT_NOPREEMPT               8
 114#define CONTEXT_LRCA_DIRTY              9
 115
 116        struct {
 117                u64 timeout_us;
 118        } watchdog;
 119
 120        u32 *lrc_reg_state;
 121        union {
 122                struct {
 123                        u32 lrca;
 124                        u32 ccid;
 125                };
 126                u64 desc;
 127        } lrc;
 128        u32 tag; /* cookie passed to HW to track this context on submission */
 129
 130        /* Time on GPU as tracked by the hw. */
 131        struct {
 132                struct ewma_runtime avg;
 133                u64 total;
 134                u32 last;
 135                I915_SELFTEST_DECLARE(u32 num_underflow);
 136                I915_SELFTEST_DECLARE(u32 max_underflow);
 137        } runtime;
 138
 139        unsigned int active_count; /* protected by timeline->mutex */
 140
 141        atomic_t pin_count;
 142        struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
 143
 144        /**
 145         * active: Active tracker for the rq activity (inc. external) on this
 146         * intel_context object.
 147         */
 148        struct i915_active active;
 149
 150        const struct intel_context_ops *ops;
 151
 152        /** sseu: Control eu/slice partitioning */
 153        struct intel_sseu sseu;
 154
 155        u8 wa_bb_page; /* if set, page num reserved for context workarounds */
 156
 157        struct {
 158                /** lock: protects everything in guc_state */
 159                spinlock_t lock;
 160                /**
 161                 * sched_state: scheduling state of this context using GuC
 162                 * submission
 163                 */
 164                u16 sched_state;
 165                /*
 166                 * fences: maintains of list of requests that have a submit
 167                 * fence related to GuC submission
 168                 */
 169                struct list_head fences;
 170        } guc_state;
 171
 172        struct {
 173                /** lock: protects everything in guc_active */
 174                spinlock_t lock;
 175                /** requests: active requests on this context */
 176                struct list_head requests;
 177        } guc_active;
 178
 179        /* GuC scheduling state flags that do not require a lock. */
 180        atomic_t guc_sched_state_no_lock;
 181
 182        /* GuC LRC descriptor ID */
 183        u16 guc_id;
 184
 185        /* GuC LRC descriptor reference count */
 186        atomic_t guc_id_ref;
 187
 188        /*
 189         * GuC ID link - in list when unpinned but guc_id still valid in GuC
 190         */
 191        struct list_head guc_id_link;
 192
 193        /* GuC context blocked fence */
 194        struct i915_sw_fence guc_blocked;
 195
 196        /*
 197         * GuC priority management
 198         */
 199        u8 guc_prio;
 200        u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
 201};
 202
 203#endif /* __INTEL_CONTEXT_TYPES__ */
 204