linux/drivers/gpu/drm/i915/intel_wakeref.h
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: MIT
   3 *
   4 * Copyright © 2019 Intel Corporation
   5 */
   6
   7#ifndef INTEL_WAKEREF_H
   8#define INTEL_WAKEREF_H
   9
  10#include <linux/atomic.h>
  11#include <linux/mutex.h>
  12#include <linux/refcount.h>
  13#include <linux/stackdepot.h>
  14#include <linux/timer.h>
  15
  16struct intel_runtime_pm;
  17
  18typedef depot_stack_handle_t intel_wakeref_t;
  19
  20struct intel_wakeref {
  21        atomic_t count;
  22        struct mutex mutex;
  23        intel_wakeref_t wakeref;
  24};
  25
  26void __intel_wakeref_init(struct intel_wakeref *wf,
  27                          struct lock_class_key *key);
  28#define intel_wakeref_init(wf) do {                                     \
  29        static struct lock_class_key __key;                             \
  30                                                                        \
  31        __intel_wakeref_init((wf), &__key);                             \
  32} while (0)
  33
  34int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
  35                              struct intel_wakeref *wf,
  36                              int (*fn)(struct intel_wakeref *wf));
  37int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
  38                             struct intel_wakeref *wf,
  39                             int (*fn)(struct intel_wakeref *wf));
  40
  41/**
  42 * intel_wakeref_get: Acquire the wakeref
  43 * @i915: the drm_i915_private device
  44 * @wf: the wakeref
  45 * @fn: callback for acquired the wakeref, called only on first acquire.
  46 *
  47 * Acquire a hold on the wakeref. The first user to do so, will acquire
  48 * the runtime pm wakeref and then call the @fn underneath the wakeref
  49 * mutex.
  50 *
  51 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
  52 * will be released and the acquisition unwound, and an error reported.
  53 *
  54 * Returns: 0 if the wakeref was acquired successfully, or a negative error
  55 * code otherwise.
  56 */
  57static inline int
  58intel_wakeref_get(struct intel_runtime_pm *rpm,
  59                  struct intel_wakeref *wf,
  60                  int (*fn)(struct intel_wakeref *wf))
  61{
  62        if (unlikely(!atomic_inc_not_zero(&wf->count)))
  63                return __intel_wakeref_get_first(rpm, wf, fn);
  64
  65        return 0;
  66}
  67
  68/**
  69 * intel_wakeref_get_if_in_use: Acquire the wakeref
  70 * @wf: the wakeref
  71 *
  72 * Acquire a hold on the wakeref, but only if the wakeref is already
  73 * active.
  74 *
  75 * Returns: true if the wakeref was acquired, false otherwise.
  76 */
  77static inline bool
  78intel_wakeref_get_if_active(struct intel_wakeref *wf)
  79{
  80        return atomic_inc_not_zero(&wf->count);
  81}
  82
  83/**
  84 * intel_wakeref_put: Release the wakeref
  85 * @i915: the drm_i915_private device
  86 * @wf: the wakeref
  87 * @fn: callback for releasing the wakeref, called only on final release.
  88 *
  89 * Release our hold on the wakeref. When there are no more users,
  90 * the runtime pm wakeref will be released after the @fn callback is called
  91 * underneath the wakeref mutex.
  92 *
  93 * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
  94 * is retained and an error reported.
  95 *
  96 * Returns: 0 if the wakeref was released successfully, or a negative error
  97 * code otherwise.
  98 */
  99static inline int
 100intel_wakeref_put(struct intel_runtime_pm *rpm,
 101                  struct intel_wakeref *wf,
 102                  int (*fn)(struct intel_wakeref *wf))
 103{
 104        if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
 105                return __intel_wakeref_put_last(rpm, wf, fn);
 106
 107        return 0;
 108}
 109
 110/**
 111 * intel_wakeref_lock: Lock the wakeref (mutex)
 112 * @wf: the wakeref
 113 *
 114 * Locks the wakeref to prevent it being acquired or released. New users
 115 * can still adjust the counter, but the wakeref itself (and callback)
 116 * cannot be acquired or released.
 117 */
 118static inline void
 119intel_wakeref_lock(struct intel_wakeref *wf)
 120        __acquires(wf->mutex)
 121{
 122        mutex_lock(&wf->mutex);
 123}
 124
 125/**
 126 * intel_wakeref_unlock: Unlock the wakeref
 127 * @wf: the wakeref
 128 *
 129 * Releases a previously acquired intel_wakeref_lock().
 130 */
 131static inline void
 132intel_wakeref_unlock(struct intel_wakeref *wf)
 133        __releases(wf->mutex)
 134{
 135        mutex_unlock(&wf->mutex);
 136}
 137
 138/**
 139 * intel_wakeref_active: Query whether the wakeref is currently held
 140 * @wf: the wakeref
 141 *
 142 * Returns: true if the wakeref is currently held.
 143 */
 144static inline bool
 145intel_wakeref_active(struct intel_wakeref *wf)
 146{
 147        return READ_ONCE(wf->wakeref);
 148}
 149
 150struct intel_wakeref_auto {
 151        struct intel_runtime_pm *rpm;
 152        struct timer_list timer;
 153        intel_wakeref_t wakeref;
 154        spinlock_t lock;
 155        refcount_t count;
 156};
 157
 158/**
 159 * intel_wakeref_auto: Delay the runtime-pm autosuspend
 160 * @wf: the wakeref
 161 * @timeout: relative timeout in jiffies
 162 *
 163 * The runtime-pm core uses a suspend delay after the last wakeref
 164 * is released before triggering runtime suspend of the device. That
 165 * delay is configurable via sysfs with little regard to the device
 166 * characteristics. Instead, we want to tune the autosuspend based on our
 167 * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
 168 * timeout.
 169 *
 170 * Pass @timeout = 0 to cancel a previous autosuspend by executing the
 171 * suspend immediately.
 172 */
 173void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
 174
 175void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
 176                             struct intel_runtime_pm *rpm);
 177void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
 178
 179#endif /* INTEL_WAKEREF_H */
 180