linux/drivers/gpu/drm/i915/gem/i915_gem_context.h
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2016 Intel Corporation
   5 */
   6
   7#ifndef __I915_GEM_CONTEXT_H__
   8#define __I915_GEM_CONTEXT_H__
   9
  10#include "i915_gem_context_types.h"
  11
  12#include "gt/intel_context.h"
  13
  14#include "i915_drv.h"
  15#include "i915_gem.h"
  16#include "i915_scheduler.h"
  17#include "intel_device_info.h"
  18
  19struct drm_device;
  20struct drm_file;
  21
  22static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
  23{
  24        return test_bit(CONTEXT_CLOSED, &ctx->flags);
  25}
  26
  27static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
  28{
  29        GEM_BUG_ON(i915_gem_context_is_closed(ctx));
  30        set_bit(CONTEXT_CLOSED, &ctx->flags);
  31}
  32
  33static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
  34{
  35        return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
  36}
  37
  38static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
  39{
  40        set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
  41}
  42
  43static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
  44{
  45        clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
  46}
  47
  48static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
  49{
  50        return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
  51}
  52
  53static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
  54{
  55        set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
  56}
  57
  58static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
  59{
  60        clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
  61}
  62
  63static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
  64{
  65        return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
  66}
  67
  68static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
  69{
  70        set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
  71}
  72
  73static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
  74{
  75        clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
  76}
  77
  78static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx)
  79{
  80        return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
  81}
  82
  83static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx)
  84{
  85        set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
  86}
  87
  88static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx)
  89{
  90        clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
  91}
  92
  93static inline bool
  94i915_gem_context_user_engines(const struct i915_gem_context *ctx)
  95{
  96        return test_bit(CONTEXT_USER_ENGINES, &ctx->flags);
  97}
  98
  99static inline void
 100i915_gem_context_set_user_engines(struct i915_gem_context *ctx)
 101{
 102        set_bit(CONTEXT_USER_ENGINES, &ctx->flags);
 103}
 104
 105static inline void
 106i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
 107{
 108        clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
 109}
 110
 111/* i915_gem_context.c */
 112void i915_gem_init__contexts(struct drm_i915_private *i915);
 113void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
 114
 115int i915_gem_context_open(struct drm_i915_private *i915,
 116                          struct drm_file *file);
 117void i915_gem_context_close(struct drm_file *file);
 118
 119void i915_gem_context_release(struct kref *ctx_ref);
 120
 121int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
 122                             struct drm_file *file);
 123int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
 124                              struct drm_file *file);
 125
 126int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 127                                  struct drm_file *file);
 128int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 129                                   struct drm_file *file);
 130int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 131                                    struct drm_file *file_priv);
 132int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 133                                    struct drm_file *file_priv);
 134int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
 135                                       struct drm_file *file);
 136
 137static inline struct i915_gem_context *
 138i915_gem_context_get(struct i915_gem_context *ctx)
 139{
 140        kref_get(&ctx->ref);
 141        return ctx;
 142}
 143
 144static inline void i915_gem_context_put(struct i915_gem_context *ctx)
 145{
 146        kref_put(&ctx->ref, i915_gem_context_release);
 147}
 148
 149static inline struct i915_address_space *
 150i915_gem_context_vm(struct i915_gem_context *ctx)
 151{
 152        return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
 153}
 154
 155static inline struct i915_address_space *
 156i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
 157{
 158        struct i915_address_space *vm;
 159
 160        rcu_read_lock();
 161        vm = rcu_dereference(ctx->vm);
 162        if (!vm)
 163                vm = &ctx->i915->ggtt.vm;
 164        vm = i915_vm_get(vm);
 165        rcu_read_unlock();
 166
 167        return vm;
 168}
 169
 170static inline struct i915_gem_engines *
 171i915_gem_context_engines(struct i915_gem_context *ctx)
 172{
 173        return rcu_dereference_protected(ctx->engines,
 174                                         lockdep_is_held(&ctx->engines_mutex));
 175}
 176
 177static inline struct i915_gem_engines *
 178i915_gem_context_lock_engines(struct i915_gem_context *ctx)
 179        __acquires(&ctx->engines_mutex)
 180{
 181        mutex_lock(&ctx->engines_mutex);
 182        return i915_gem_context_engines(ctx);
 183}
 184
 185static inline void
 186i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
 187        __releases(&ctx->engines_mutex)
 188{
 189        mutex_unlock(&ctx->engines_mutex);
 190}
 191
 192static inline struct intel_context *
 193i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
 194{
 195        struct intel_context *ce;
 196
 197        rcu_read_lock(); {
 198                struct i915_gem_engines *e = rcu_dereference(ctx->engines);
 199                if (unlikely(!e)) /* context was closed! */
 200                        ce = ERR_PTR(-ENOENT);
 201                else if (likely(idx < e->num_engines && e->engines[idx]))
 202                        ce = intel_context_get(e->engines[idx]);
 203                else
 204                        ce = ERR_PTR(-EINVAL);
 205        } rcu_read_unlock();
 206
 207        return ce;
 208}
 209
 210static inline void
 211i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
 212                           struct i915_gem_engines *engines)
 213{
 214        it->engines = engines;
 215        it->idx = 0;
 216}
 217
 218struct intel_context *
 219i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
 220
 221#define for_each_gem_engine(ce, engines, it) \
 222        for (i915_gem_engines_iter_init(&(it), (engines)); \
 223             ((ce) = i915_gem_engines_iter_next(&(it)));)
 224
 225struct i915_lut_handle *i915_lut_handle_alloc(void);
 226void i915_lut_handle_free(struct i915_lut_handle *lut);
 227
 228int i915_gem_user_to_context_sseu(struct intel_gt *gt,
 229                                  const struct drm_i915_gem_context_param_sseu *user,
 230                                  struct intel_sseu *context);
 231
 232#endif /* !__I915_GEM_CONTEXT_H__ */
 233