linux/include/linux/hwspinlock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Hardware spinlock public header
   4 *
   5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
   6 *
   7 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
   8 */
   9
  10#ifndef __LINUX_HWSPINLOCK_H
  11#define __LINUX_HWSPINLOCK_H
  12
  13#include <linux/err.h>
  14#include <linux/sched.h>
  15
  16/* hwspinlock mode argument */
  17#define HWLOCK_IRQSTATE 0x01    /* Disable interrupts, save state */
  18#define HWLOCK_IRQ      0x02    /* Disable interrupts, don't save state */
  19#define HWLOCK_RAW      0x03
  20
  21struct device;
  22struct device_node;
  23struct hwspinlock;
  24struct hwspinlock_device;
  25struct hwspinlock_ops;
  26
  27/**
  28 * struct hwspinlock_pdata - platform data for hwspinlock drivers
  29 * @base_id: base id for this hwspinlock device
  30 *
  31 * hwspinlock devices provide system-wide hardware locks that are used
  32 * by remote processors that have no other way to achieve synchronization.
  33 *
  34 * To achieve that, each physical lock must have a system-wide id number
  35 * that is agreed upon, otherwise remote processors can't possibly assume
  36 * they're using the same hardware lock.
  37 *
  38 * Usually boards have a single hwspinlock device, which provides several
  39 * hwspinlocks, and in this case, they can be trivially numbered 0 to
  40 * (num-of-locks - 1).
  41 *
  42 * In case boards have several hwspinlocks devices, a different base id
  43 * should be used for each hwspinlock device (they can't all use 0 as
  44 * a starting id!).
  45 *
  46 * This platform data structure should be used to provide the base id
  47 * for each device (which is trivially 0 when only a single hwspinlock
  48 * device exists). It can be shared between different platforms, hence
  49 * its location.
  50 */
  51struct hwspinlock_pdata {
  52        int base_id;
  53};
  54
  55#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
  56
  57int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
  58                const struct hwspinlock_ops *ops, int base_id, int num_locks);
  59int hwspin_lock_unregister(struct hwspinlock_device *bank);
  60struct hwspinlock *hwspin_lock_request(void);
  61struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
  62int hwspin_lock_free(struct hwspinlock *hwlock);
  63int of_hwspin_lock_get_id(struct device_node *np, int index);
  64int hwspin_lock_get_id(struct hwspinlock *hwlock);
  65int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
  66                                                        unsigned long *);
  67int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
  68void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
  69
  70#else /* !CONFIG_HWSPINLOCK */
  71
  72/*
  73 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
  74 * enabled. We prefer to silently succeed in this case, and let the
  75 * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
  76 * required on a given setup, users will still work.
  77 *
  78 * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
  79 * we _do_ want users to fail (no point in registering hwspinlock instances if
  80 * the framework is not available).
  81 *
  82 * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
  83 * users. Others, which care, can still check this with IS_ERR.
  84 */
  85static inline struct hwspinlock *hwspin_lock_request(void)
  86{
  87        return ERR_PTR(-ENODEV);
  88}
  89
  90static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
  91{
  92        return ERR_PTR(-ENODEV);
  93}
  94
  95static inline int hwspin_lock_free(struct hwspinlock *hwlock)
  96{
  97        return 0;
  98}
  99
 100static inline
 101int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
 102                                        int mode, unsigned long *flags)
 103{
 104        return 0;
 105}
 106
 107static inline
 108int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
 109{
 110        return 0;
 111}
 112
 113static inline
 114void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
 115{
 116}
 117
 118static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
 119{
 120        return 0;
 121}
 122
 123static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
 124{
 125        return 0;
 126}
 127
 128#endif /* !CONFIG_HWSPINLOCK */
 129
 130/**
 131 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
 132 * @hwlock: an hwspinlock which we want to trylock
 133 * @flags: a pointer to where the caller's interrupt state will be saved at
 134 *
 135 * This function attempts to lock the underlying hwspinlock, and will
 136 * immediately fail if the hwspinlock is already locked.
 137 *
 138 * Upon a successful return from this function, preemption and local
 139 * interrupts are disabled (previous interrupts state is saved at @flags),
 140 * so the caller must not sleep, and is advised to release the hwspinlock
 141 * as soon as possible.
 142 *
 143 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
 144 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
 145 */
 146static inline
 147int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
 148{
 149        return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
 150}
 151
 152/**
 153 * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
 154 * @hwlock: an hwspinlock which we want to trylock
 155 *
 156 * This function attempts to lock the underlying hwspinlock, and will
 157 * immediately fail if the hwspinlock is already locked.
 158 *
 159 * Upon a successful return from this function, preemption and local
 160 * interrupts are disabled, so the caller must not sleep, and is advised
 161 * to release the hwspinlock as soon as possible.
 162 *
 163 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
 164 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
 165 */
 166static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
 167{
 168        return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
 169}
 170
 171/**
 172 * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
 173 * @hwlock: an hwspinlock which we want to trylock
 174 *
 175 * This function attempts to lock an hwspinlock, and will immediately fail
 176 * if the hwspinlock is already taken.
 177 *
 178 * Caution: User must protect the routine of getting hardware lock with mutex
 179 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
 180 * or sleepable operations under the hardware lock.
 181 *
 182 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
 183 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
 184 */
 185static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
 186{
 187        return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
 188}
 189
 190/**
 191 * hwspin_trylock() - attempt to lock a specific hwspinlock
 192 * @hwlock: an hwspinlock which we want to trylock
 193 *
 194 * This function attempts to lock an hwspinlock, and will immediately fail
 195 * if the hwspinlock is already taken.
 196 *
 197 * Upon a successful return from this function, preemption is disabled,
 198 * so the caller must not sleep, and is advised to release the hwspinlock
 199 * as soon as possible. This is required in order to minimize remote cores
 200 * polling on the hardware interconnect.
 201 *
 202 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
 203 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
 204 */
 205static inline int hwspin_trylock(struct hwspinlock *hwlock)
 206{
 207        return __hwspin_trylock(hwlock, 0, NULL);
 208}
 209
 210/**
 211 * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
 212 * @hwlock: the hwspinlock to be locked
 213 * @to: timeout value in msecs
 214 * @flags: a pointer to where the caller's interrupt state will be saved at
 215 *
 216 * This function locks the underlying @hwlock. If the @hwlock
 217 * is already taken, the function will busy loop waiting for it to
 218 * be released, but give up when @timeout msecs have elapsed.
 219 *
 220 * Upon a successful return from this function, preemption and local interrupts
 221 * are disabled (plus previous interrupt state is saved), so the caller must
 222 * not sleep, and is advised to release the hwspinlock as soon as possible.
 223 *
 224 * Returns 0 when the @hwlock was successfully taken, and an appropriate
 225 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
 226 * busy after @timeout msecs). The function will never sleep.
 227 */
 228static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
 229                                unsigned int to, unsigned long *flags)
 230{
 231        return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
 232}
 233
 234/**
 235 * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
 236 * @hwlock: the hwspinlock to be locked
 237 * @to: timeout value in msecs
 238 *
 239 * This function locks the underlying @hwlock. If the @hwlock
 240 * is already taken, the function will busy loop waiting for it to
 241 * be released, but give up when @timeout msecs have elapsed.
 242 *
 243 * Upon a successful return from this function, preemption and local interrupts
 244 * are disabled so the caller must not sleep, and is advised to release the
 245 * hwspinlock as soon as possible.
 246 *
 247 * Returns 0 when the @hwlock was successfully taken, and an appropriate
 248 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
 249 * busy after @timeout msecs). The function will never sleep.
 250 */
 251static inline
 252int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
 253{
 254        return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
 255}
 256
 257/**
 258 * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
 259 * @hwlock: the hwspinlock to be locked
 260 * @to: timeout value in msecs
 261 *
 262 * This function locks the underlying @hwlock. If the @hwlock
 263 * is already taken, the function will busy loop waiting for it to
 264 * be released, but give up when @timeout msecs have elapsed.
 265 *
 266 * Caution: User must protect the routine of getting hardware lock with mutex
 267 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
 268 * or sleepable operations under the hardware lock.
 269 *
 270 * Returns 0 when the @hwlock was successfully taken, and an appropriate
 271 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
 272 * busy after @timeout msecs). The function will never sleep.
 273 */
 274static inline
 275int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
 276{
 277        return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
 278}
 279
 280/**
 281 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
 282 * @hwlock: the hwspinlock to be locked
 283 * @to: timeout value in msecs
 284 *
 285 * This function locks the underlying @hwlock. If the @hwlock
 286 * is already taken, the function will busy loop waiting for it to
 287 * be released, but give up when @timeout msecs have elapsed.
 288 *
 289 * Upon a successful return from this function, preemption is disabled
 290 * so the caller must not sleep, and is advised to release the hwspinlock
 291 * as soon as possible.
 292 * This is required in order to minimize remote cores polling on the
 293 * hardware interconnect.
 294 *
 295 * Returns 0 when the @hwlock was successfully taken, and an appropriate
 296 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
 297 * busy after @timeout msecs). The function will never sleep.
 298 */
 299static inline
 300int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
 301{
 302        return __hwspin_lock_timeout(hwlock, to, 0, NULL);
 303}
 304
 305/**
 306 * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
 307 * @hwlock: a previously-acquired hwspinlock which we want to unlock
 308 * @flags: previous caller's interrupt state to restore
 309 *
 310 * This function will unlock a specific hwspinlock, enable preemption and
 311 * restore the previous state of the local interrupts. It should be used
 312 * to undo, e.g., hwspin_trylock_irqsave().
 313 *
 314 * @hwlock must be already locked before calling this function: it is a bug
 315 * to call unlock on a @hwlock that is already unlocked.
 316 */
 317static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
 318                                                        unsigned long *flags)
 319{
 320        __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
 321}
 322
 323/**
 324 * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
 325 * @hwlock: a previously-acquired hwspinlock which we want to unlock
 326 *
 327 * This function will unlock a specific hwspinlock, enable preemption and
 328 * enable local interrupts. Should be used to undo hwspin_lock_irq().
 329 *
 330 * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
 331 * calling this function: it is a bug to call unlock on a @hwlock that is
 332 * already unlocked.
 333 */
 334static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
 335{
 336        __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
 337}
 338
 339/**
 340 * hwspin_unlock_raw() - unlock hwspinlock
 341 * @hwlock: a previously-acquired hwspinlock which we want to unlock
 342 *
 343 * This function will unlock a specific hwspinlock.
 344 *
 345 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
 346 * this function: it is a bug to call unlock on a @hwlock that is already
 347 * unlocked.
 348 */
 349static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
 350{
 351        __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
 352}
 353
 354/**
 355 * hwspin_unlock() - unlock hwspinlock
 356 * @hwlock: a previously-acquired hwspinlock which we want to unlock
 357 *
 358 * This function will unlock a specific hwspinlock and enable preemption
 359 * back.
 360 *
 361 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
 362 * this function: it is a bug to call unlock on a @hwlock that is already
 363 * unlocked.
 364 */
 365static inline void hwspin_unlock(struct hwspinlock *hwlock)
 366{
 367        __hwspin_unlock(hwlock, 0, NULL);
 368}
 369
 370#endif /* __LINUX_HWSPINLOCK_H */
 371