linux/include/linux/hwspinlock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Hardware spinlock public header
   4 *
   5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
   6 *
   7 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
   8 */
   9
  10#ifndef __LINUX_HWSPINLOCK_H
  11#define __LINUX_HWSPINLOCK_H
  12
  13#include <linux/err.h>
  14#include <linux/sched.h>
  15
  16/* hwspinlock mode argument */
  17#define HWLOCK_IRQSTATE         0x01 /* Disable interrupts, save state */
  18#define HWLOCK_IRQ              0x02 /* Disable interrupts, don't save state */
  19#define HWLOCK_RAW              0x03
  20#define HWLOCK_IN_ATOMIC        0x04 /* Called while in atomic context */
  21
  22struct device;
  23struct device_node;
  24struct hwspinlock;
  25struct hwspinlock_device;
  26struct hwspinlock_ops;
  27
  28/**
  29 * struct hwspinlock_pdata - platform data for hwspinlock drivers
  30 * @base_id: base id for this hwspinlock device
  31 *
  32 * hwspinlock devices provide system-wide hardware locks that are used
  33 * by remote processors that have no other way to achieve synchronization.
  34 *
  35 * To achieve that, each physical lock must have a system-wide id number
  36 * that is agreed upon, otherwise remote processors can't possibly assume
  37 * they're using the same hardware lock.
  38 *
  39 * Usually boards have a single hwspinlock device, which provides several
  40 * hwspinlocks, and in this case, they can be trivially numbered 0 to
  41 * (num-of-locks - 1).
  42 *
  43 * In case boards have several hwspinlocks devices, a different base id
  44 * should be used for each hwspinlock device (they can't all use 0 as
  45 * a starting id!).
  46 *
  47 * This platform data structure should be used to provide the base id
  48 * for each device (which is trivially 0 when only a single hwspinlock
  49 * device exists). It can be shared between different platforms, hence
  50 * its location.
  51 */
  52struct hwspinlock_pdata {
  53        int base_id;
  54};
  55
  56#ifdef CONFIG_HWSPINLOCK
  57
  58int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
  59                const struct hwspinlock_ops *ops, int base_id, int num_locks);
  60int hwspin_lock_unregister(struct hwspinlock_device *bank);
  61struct hwspinlock *hwspin_lock_request(void);
  62struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
  63int hwspin_lock_free(struct hwspinlock *hwlock);
  64int of_hwspin_lock_get_id(struct device_node *np, int index);
  65int hwspin_lock_get_id(struct hwspinlock *hwlock);
  66int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
  67                                                        unsigned long *);
  68int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
  69void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
  70int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
  71int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
  72struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
  73struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
  74                                                     unsigned int id);
  75int devm_hwspin_lock_unregister(struct device *dev,
  76                                struct hwspinlock_device *bank);
  77int devm_hwspin_lock_register(struct device *dev,
  78                              struct hwspinlock_device *bank,
  79                              const struct hwspinlock_ops *ops,
  80                              int base_id, int num_locks);
  81
  82#else /* !CONFIG_HWSPINLOCK */
  83
  84/*
  85 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
  86 * enabled. We prefer to silently succeed in this case, and let the
  87 * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
  88 * required on a given setup, users will still work.
  89 *
  90 * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
  91 * we _do_ want users to fail (no point in registering hwspinlock instances if
  92 * the framework is not available).
  93 *
  94 * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
  95 * users. Others, which care, can still check this with IS_ERR.
  96 */
  97static inline struct hwspinlock *hwspin_lock_request(void)
  98{
  99        return ERR_PTR(-ENODEV);
 100}
 101
 102static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
 103{
 104        return ERR_PTR(-ENODEV);
 105}
 106
 107static inline int hwspin_lock_free(struct hwspinlock *hwlock)
 108{
 109        return 0;
 110}
 111
 112static inline
 113int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
 114                                        int mode, unsigned long *flags)
 115{
 116        return 0;
 117}
 118
 119static inline
 120int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
 121{
 122        return 0;
 123}
 124
 125static inline
 126void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
 127{
 128}
 129
 130static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
 131{
 132        return 0;
 133}
 134
 135static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
 136{
 137        return 0;
 138}
 139
 140static inline
 141int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
 142{
 143        return 0;
 144}
 145
 146static inline
 147int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
 148{
 149        return 0;
 150}
 151
 152static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
 153{
 154        return ERR_PTR(-ENODEV);
 155}
 156
 157static inline
 158struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
 159                                                     unsigned int id)
 160{
 161        return ERR_PTR(-ENODEV);
 162}
 163
 164#endif /* !CONFIG_HWSPINLOCK */
 165
 166/**
 167 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
 168 * @hwlock: an hwspinlock which we want to trylock
 169 * @flags: a pointer to where the caller's interrupt state will be saved at
 170 *
 171 * This function attempts to lock the underlying hwspinlock, and will
 172 * immediately fail if the hwspinlock is already locked.
 173 *
 174 * Upon a successful return from this function, preemption and local
 175 * interrupts are disabled (previous interrupts state is saved at @flags),
 176 * so the caller must not sleep, and is advised to release the hwspinlock
 177 * as soon as possible.
 178 *
 179 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
 180 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
 181 */
 182static inline
 183int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
 184{
 185        return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
 186}
 187
 188/**
 189 * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
 190 * @hwlock: an hwspinlock which we want to trylock
 191 *
 192 * This function attempts to lock the underlying hwspinlock, and will
 193 * immediately fail if the hwspinlock is already locked.
 194 *
 195 * Upon a successful return from this function, preemption and local
 196 * interrupts are disabled, so the caller must not sleep, and is advised
 197 * to release the hwspinlock as soon as possible.
 198 *
 199 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
 200 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
 201 */
 202static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
 203{
 204        return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
 205}
 206
 207/**
 208 * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
 209 * @hwlock: an hwspinlock which we want to trylock
 210 *
 211 * This function attempts to lock an hwspinlock, and will immediately fail
 212 * if the hwspinlock is already taken.
 213 *
 214 * Caution: User must protect the routine of getting hardware lock with mutex
 215 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
 216 * or sleepable operations under the hardware lock.
 217 *
 218 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
 219 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
 220 */
 221static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
 222{
 223        return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
 224}
 225
 226/**
 227 * hwspin_trylock_in_atomic() - attempt to lock a specific hwspinlock
 228 * @hwlock: an hwspinlock which we want to trylock
 229 *
 230 * This function attempts to lock an hwspinlock, and will immediately fail
 231 * if the hwspinlock is already taken.
 232 *
 233 * This function shall be called only from an atomic context.
 234 *
 235 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
 236 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
 237 */
 238static inline int hwspin_trylock_in_atomic(struct hwspinlock *hwlock)
 239{
 240        return __hwspin_trylock(hwlock, HWLOCK_IN_ATOMIC, NULL);
 241}
 242
 243/**
 244 * hwspin_trylock() - attempt to lock a specific hwspinlock
 245 * @hwlock: an hwspinlock which we want to trylock
 246 *
 247 * This function attempts to lock an hwspinlock, and will immediately fail
 248 * if the hwspinlock is already taken.
 249 *
 250 * Upon a successful return from this function, preemption is disabled,
 251 * so the caller must not sleep, and is advised to release the hwspinlock
 252 * as soon as possible. This is required in order to minimize remote cores
 253 * polling on the hardware interconnect.
 254 *
 255 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
 256 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
 257 */
 258static inline int hwspin_trylock(struct hwspinlock *hwlock)
 259{
 260        return __hwspin_trylock(hwlock, 0, NULL);
 261}
 262
 263/**
 264 * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
 265 * @hwlock: the hwspinlock to be locked
 266 * @to: timeout value in msecs
 267 * @flags: a pointer to where the caller's interrupt state will be saved at
 268 *
 269 * This function locks the underlying @hwlock. If the @hwlock
 270 * is already taken, the function will busy loop waiting for it to
 271 * be released, but give up when @timeout msecs have elapsed.
 272 *
 273 * Upon a successful return from this function, preemption and local interrupts
 274 * are disabled (plus previous interrupt state is saved), so the caller must
 275 * not sleep, and is advised to release the hwspinlock as soon as possible.
 276 *
 277 * Returns 0 when the @hwlock was successfully taken, and an appropriate
 278 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
 279 * busy after @timeout msecs). The function will never sleep.
 280 */
 281static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
 282                                unsigned int to, unsigned long *flags)
 283{
 284        return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
 285}
 286
 287/**
 288 * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
 289 * @hwlock: the hwspinlock to be locked
 290 * @to: timeout value in msecs
 291 *
 292 * This function locks the underlying @hwlock. If the @hwlock
 293 * is already taken, the function will busy loop waiting for it to
 294 * be released, but give up when @timeout msecs have elapsed.
 295 *
 296 * Upon a successful return from this function, preemption and local interrupts
 297 * are disabled so the caller must not sleep, and is advised to release the
 298 * hwspinlock as soon as possible.
 299 *
 300 * Returns 0 when the @hwlock was successfully taken, and an appropriate
 301 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
 302 * busy after @timeout msecs). The function will never sleep.
 303 */
 304static inline
 305int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
 306{
 307        return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
 308}
 309
 310/**
 311 * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
 312 * @hwlock: the hwspinlock to be locked
 313 * @to: timeout value in msecs
 314 *
 315 * This function locks the underlying @hwlock. If the @hwlock
 316 * is already taken, the function will busy loop waiting for it to
 317 * be released, but give up when @timeout msecs have elapsed.
 318 *
 319 * Caution: User must protect the routine of getting hardware lock with mutex
 320 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
 321 * or sleepable operations under the hardware lock.
 322 *
 323 * Returns 0 when the @hwlock was successfully taken, and an appropriate
 324 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
 325 * busy after @timeout msecs). The function will never sleep.
 326 */
 327static inline
 328int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
 329{
 330        return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
 331}
 332
 333/**
 334 * hwspin_lock_timeout_in_atomic() - lock an hwspinlock with timeout limit
 335 * @hwlock: the hwspinlock to be locked
 336 * @to: timeout value in msecs
 337 *
 338 * This function locks the underlying @hwlock. If the @hwlock
 339 * is already taken, the function will busy loop waiting for it to
 340 * be released, but give up when @timeout msecs have elapsed.
 341 *
 342 * This function shall be called only from an atomic context and the timeout
 343 * value shall not exceed a few msecs.
 344 *
 345 * Returns 0 when the @hwlock was successfully taken, and an appropriate
 346 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
 347 * busy after @timeout msecs). The function will never sleep.
 348 */
 349static inline
 350int hwspin_lock_timeout_in_atomic(struct hwspinlock *hwlock, unsigned int to)
 351{
 352        return __hwspin_lock_timeout(hwlock, to, HWLOCK_IN_ATOMIC, NULL);
 353}
 354
 355/**
 356 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
 357 * @hwlock: the hwspinlock to be locked
 358 * @to: timeout value in msecs
 359 *
 360 * This function locks the underlying @hwlock. If the @hwlock
 361 * is already taken, the function will busy loop waiting for it to
 362 * be released, but give up when @timeout msecs have elapsed.
 363 *
 364 * Upon a successful return from this function, preemption is disabled
 365 * so the caller must not sleep, and is advised to release the hwspinlock
 366 * as soon as possible.
 367 * This is required in order to minimize remote cores polling on the
 368 * hardware interconnect.
 369 *
 370 * Returns 0 when the @hwlock was successfully taken, and an appropriate
 371 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
 372 * busy after @timeout msecs). The function will never sleep.
 373 */
 374static inline
 375int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
 376{
 377        return __hwspin_lock_timeout(hwlock, to, 0, NULL);
 378}
 379
 380/**
 381 * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
 382 * @hwlock: a previously-acquired hwspinlock which we want to unlock
 383 * @flags: previous caller's interrupt state to restore
 384 *
 385 * This function will unlock a specific hwspinlock, enable preemption and
 386 * restore the previous state of the local interrupts. It should be used
 387 * to undo, e.g., hwspin_trylock_irqsave().
 388 *
 389 * @hwlock must be already locked before calling this function: it is a bug
 390 * to call unlock on a @hwlock that is already unlocked.
 391 */
 392static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
 393                                                        unsigned long *flags)
 394{
 395        __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
 396}
 397
 398/**
 399 * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
 400 * @hwlock: a previously-acquired hwspinlock which we want to unlock
 401 *
 402 * This function will unlock a specific hwspinlock, enable preemption and
 403 * enable local interrupts. Should be used to undo hwspin_lock_irq().
 404 *
 405 * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
 406 * calling this function: it is a bug to call unlock on a @hwlock that is
 407 * already unlocked.
 408 */
 409static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
 410{
 411        __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
 412}
 413
 414/**
 415 * hwspin_unlock_raw() - unlock hwspinlock
 416 * @hwlock: a previously-acquired hwspinlock which we want to unlock
 417 *
 418 * This function will unlock a specific hwspinlock.
 419 *
 420 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
 421 * this function: it is a bug to call unlock on a @hwlock that is already
 422 * unlocked.
 423 */
 424static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
 425{
 426        __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
 427}
 428
 429/**
 430 * hwspin_unlock_in_atomic() - unlock hwspinlock
 431 * @hwlock: a previously-acquired hwspinlock which we want to unlock
 432 *
 433 * This function will unlock a specific hwspinlock.
 434 *
 435 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
 436 * this function: it is a bug to call unlock on a @hwlock that is already
 437 * unlocked.
 438 */
 439static inline void hwspin_unlock_in_atomic(struct hwspinlock *hwlock)
 440{
 441        __hwspin_unlock(hwlock, HWLOCK_IN_ATOMIC, NULL);
 442}
 443
 444/**
 445 * hwspin_unlock() - unlock hwspinlock
 446 * @hwlock: a previously-acquired hwspinlock which we want to unlock
 447 *
 448 * This function will unlock a specific hwspinlock and enable preemption
 449 * back.
 450 *
 451 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
 452 * this function: it is a bug to call unlock on a @hwlock that is already
 453 * unlocked.
 454 */
 455static inline void hwspin_unlock(struct hwspinlock *hwlock)
 456{
 457        __hwspin_unlock(hwlock, 0, NULL);
 458}
 459
 460#endif /* __LINUX_HWSPINLOCK_H */
 461