linux/drivers/hwspinlock/hwspinlock_core.c
<<
>>
Prefs
   1/*
   2 * Hardware spinlock framework
   3 *
   4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
   5 *
   6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of the GNU General Public License version 2 as published
  10 * by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 */
  17
  18#define pr_fmt(fmt)    "%s: " fmt, __func__
  19
  20#include <linux/kernel.h>
  21#include <linux/module.h>
  22#include <linux/spinlock.h>
  23#include <linux/types.h>
  24#include <linux/err.h>
  25#include <linux/jiffies.h>
  26#include <linux/radix-tree.h>
  27#include <linux/hwspinlock.h>
  28#include <linux/pm_runtime.h>
  29#include <linux/mutex.h>
  30#include <linux/of.h>
  31
  32#include "hwspinlock_internal.h"
  33
  34/* radix tree tags */
  35#define HWSPINLOCK_UNUSED       (0) /* tags an hwspinlock as unused */
  36
  37/*
  38 * A radix tree is used to maintain the available hwspinlock instances.
  39 * The tree associates hwspinlock pointers with their integer key id,
  40 * and provides easy-to-use API which makes the hwspinlock core code simple
  41 * and easy to read.
  42 *
  43 * Radix trees are quick on lookups, and reasonably efficient in terms of
  44 * storage, especially with high density usages such as this framework
  45 * requires (a continuous range of integer keys, beginning with zero, is
  46 * used as the ID's of the hwspinlock instances).
  47 *
  48 * The radix tree API supports tagging items in the tree, which this
  49 * framework uses to mark unused hwspinlock instances (see the
  50 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
  51 * tree, looking for an unused hwspinlock instance, is now reduced to a
  52 * single radix tree API call.
  53 */
  54static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
  55
  56/*
  57 * Synchronization of access to the tree is achieved using this mutex,
  58 * as the radix-tree API requires that users provide all synchronisation.
  59 * A mutex is needed because we're using non-atomic radix tree allocations.
  60 */
  61static DEFINE_MUTEX(hwspinlock_tree_lock);
  62
  63
  64/**
  65 * __hwspin_trylock() - attempt to lock a specific hwspinlock
  66 * @hwlock: an hwspinlock which we want to trylock
  67 * @mode: controls whether local interrupts are disabled or not
  68 * @flags: a pointer where the caller's interrupt state will be saved at (if
  69 *         requested)
  70 *
  71 * This function attempts to lock an hwspinlock, and will immediately
  72 * fail if the hwspinlock is already taken.
  73 *
  74 * Upon a successful return from this function, preemption (and possibly
  75 * interrupts) is disabled, so the caller must not sleep, and is advised to
  76 * release the hwspinlock as soon as possible. This is required in order to
  77 * minimize remote cores polling on the hardware interconnect.
  78 *
  79 * The user decides whether local interrupts are disabled or not, and if yes,
  80 * whether he wants their previous state to be saved. It is up to the user
  81 * to choose the appropriate @mode of operation, exactly the same way users
  82 * should decide between spin_trylock, spin_trylock_irq and
  83 * spin_trylock_irqsave.
  84 *
  85 * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
  86 * the hwspinlock was already taken.
  87 * This function will never sleep.
  88 */
  89int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
  90{
  91        int ret;
  92
  93        BUG_ON(!hwlock);
  94        BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
  95
  96        /*
  97         * This spin_lock{_irq, _irqsave} serves three purposes:
  98         *
  99         * 1. Disable preemption, in order to minimize the period of time
 100         *    in which the hwspinlock is taken. This is important in order
 101         *    to minimize the possible polling on the hardware interconnect
 102         *    by a remote user of this lock.
 103         * 2. Make the hwspinlock SMP-safe (so we can take it from
 104         *    additional contexts on the local host).
 105         * 3. Ensure that in_atomic/might_sleep checks catch potential
 106         *    problems with hwspinlock usage (e.g. scheduler checks like
 107         *    'scheduling while atomic' etc.)
 108         */
 109        if (mode == HWLOCK_IRQSTATE)
 110                ret = spin_trylock_irqsave(&hwlock->lock, *flags);
 111        else if (mode == HWLOCK_IRQ)
 112                ret = spin_trylock_irq(&hwlock->lock);
 113        else
 114                ret = spin_trylock(&hwlock->lock);
 115
 116        /* is lock already taken by another context on the local cpu ? */
 117        if (!ret)
 118                return -EBUSY;
 119
 120        /* try to take the hwspinlock device */
 121        ret = hwlock->bank->ops->trylock(hwlock);
 122
 123        /* if hwlock is already taken, undo spin_trylock_* and exit */
 124        if (!ret) {
 125                if (mode == HWLOCK_IRQSTATE)
 126                        spin_unlock_irqrestore(&hwlock->lock, *flags);
 127                else if (mode == HWLOCK_IRQ)
 128                        spin_unlock_irq(&hwlock->lock);
 129                else
 130                        spin_unlock(&hwlock->lock);
 131
 132                return -EBUSY;
 133        }
 134
 135        /*
 136         * We can be sure the other core's memory operations
 137         * are observable to us only _after_ we successfully take
 138         * the hwspinlock, and we must make sure that subsequent memory
 139         * operations (both reads and writes) will not be reordered before
 140         * we actually took the hwspinlock.
 141         *
 142         * Note: the implicit memory barrier of the spinlock above is too
 143         * early, so we need this additional explicit memory barrier.
 144         */
 145        mb();
 146
 147        return 0;
 148}
 149EXPORT_SYMBOL_GPL(__hwspin_trylock);
 150
 151/**
 152 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
 153 * @hwlock: the hwspinlock to be locked
 154 * @timeout: timeout value in msecs
 155 * @mode: mode which controls whether local interrupts are disabled or not
 156 * @flags: a pointer to where the caller's interrupt state will be saved at (if
 157 *         requested)
 158 *
 159 * This function locks the given @hwlock. If the @hwlock
 160 * is already taken, the function will busy loop waiting for it to
 161 * be released, but give up after @timeout msecs have elapsed.
 162 *
 163 * Upon a successful return from this function, preemption is disabled
 164 * (and possibly local interrupts, too), so the caller must not sleep,
 165 * and is advised to release the hwspinlock as soon as possible.
 166 * This is required in order to minimize remote cores polling on the
 167 * hardware interconnect.
 168 *
 169 * The user decides whether local interrupts are disabled or not, and if yes,
 170 * whether he wants their previous state to be saved. It is up to the user
 171 * to choose the appropriate @mode of operation, exactly the same way users
 172 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
 173 *
 174 * Returns 0 when the @hwlock was successfully taken, and an appropriate
 175 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
 176 * busy after @timeout msecs). The function will never sleep.
 177 */
 178int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
 179                                        int mode, unsigned long *flags)
 180{
 181        int ret;
 182        unsigned long expire;
 183
 184        expire = msecs_to_jiffies(to) + jiffies;
 185
 186        for (;;) {
 187                /* Try to take the hwspinlock */
 188                ret = __hwspin_trylock(hwlock, mode, flags);
 189                if (ret != -EBUSY)
 190                        break;
 191
 192                /*
 193                 * The lock is already taken, let's check if the user wants
 194                 * us to try again
 195                 */
 196                if (time_is_before_eq_jiffies(expire))
 197                        return -ETIMEDOUT;
 198
 199                /*
 200                 * Allow platform-specific relax handlers to prevent
 201                 * hogging the interconnect (no sleeping, though)
 202                 */
 203                if (hwlock->bank->ops->relax)
 204                        hwlock->bank->ops->relax(hwlock);
 205        }
 206
 207        return ret;
 208}
 209EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
 210
 211/**
 212 * __hwspin_unlock() - unlock a specific hwspinlock
 213 * @hwlock: a previously-acquired hwspinlock which we want to unlock
 214 * @mode: controls whether local interrupts needs to be restored or not
 215 * @flags: previous caller's interrupt state to restore (if requested)
 216 *
 217 * This function will unlock a specific hwspinlock, enable preemption and
 218 * (possibly) enable interrupts or restore their previous state.
 219 * @hwlock must be already locked before calling this function: it is a bug
 220 * to call unlock on a @hwlock that is already unlocked.
 221 *
 222 * The user decides whether local interrupts should be enabled or not, and
 223 * if yes, whether he wants their previous state to be restored. It is up
 224 * to the user to choose the appropriate @mode of operation, exactly the
 225 * same way users decide between spin_unlock, spin_unlock_irq and
 226 * spin_unlock_irqrestore.
 227 *
 228 * The function will never sleep.
 229 */
 230void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
 231{
 232        BUG_ON(!hwlock);
 233        BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
 234
 235        /*
 236         * We must make sure that memory operations (both reads and writes),
 237         * done before unlocking the hwspinlock, will not be reordered
 238         * after the lock is released.
 239         *
 240         * That's the purpose of this explicit memory barrier.
 241         *
 242         * Note: the memory barrier induced by the spin_unlock below is too
 243         * late; the other core is going to access memory soon after it will
 244         * take the hwspinlock, and by then we want to be sure our memory
 245         * operations are already observable.
 246         */
 247        mb();
 248
 249        hwlock->bank->ops->unlock(hwlock);
 250
 251        /* Undo the spin_trylock{_irq, _irqsave} called while locking */
 252        if (mode == HWLOCK_IRQSTATE)
 253                spin_unlock_irqrestore(&hwlock->lock, *flags);
 254        else if (mode == HWLOCK_IRQ)
 255                spin_unlock_irq(&hwlock->lock);
 256        else
 257                spin_unlock(&hwlock->lock);
 258}
 259EXPORT_SYMBOL_GPL(__hwspin_unlock);
 260
 261/**
 262 * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id
 263 * @bank: the hwspinlock device bank
 264 * @hwlock_spec: hwlock specifier as found in the device tree
 265 *
 266 * This is a simple translation function, suitable for hwspinlock platform
 267 * drivers that only has a lock specifier length of 1.
 268 *
 269 * Returns a relative index of the lock within a specified bank on success,
 270 * or -EINVAL on invalid specifier cell count.
 271 */
 272static inline int
 273of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec)
 274{
 275        if (WARN_ON(hwlock_spec->args_count != 1))
 276                return -EINVAL;
 277
 278        return hwlock_spec->args[0];
 279}
 280
 281/**
 282 * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock
 283 * @np: device node from which to request the specific hwlock
 284 * @index: index of the hwlock in the list of values
 285 *
 286 * This function provides a means for DT users of the hwspinlock module to
 287 * get the global lock id of a specific hwspinlock using the phandle of the
 288 * hwspinlock device, so that it can be requested using the normal
 289 * hwspin_lock_request_specific() API.
 290 *
 291 * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock
 292 * device is not yet registered, -EINVAL on invalid args specifier value or an
 293 * appropriate error as returned from the OF parsing of the DT client node.
 294 */
 295int of_hwspin_lock_get_id(struct device_node *np, int index)
 296{
 297        struct of_phandle_args args;
 298        struct hwspinlock *hwlock;
 299        struct radix_tree_iter iter;
 300        void **slot;
 301        int id;
 302        int ret;
 303
 304        ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index,
 305                                         &args);
 306        if (ret)
 307                return ret;
 308
 309        /* Find the hwspinlock device: we need its base_id */
 310        ret = -EPROBE_DEFER;
 311        rcu_read_lock();
 312        radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) {
 313                hwlock = radix_tree_deref_slot(slot);
 314                if (unlikely(!hwlock))
 315                        continue;
 316                if (radix_tree_deref_retry(hwlock)) {
 317                        slot = radix_tree_iter_retry(&iter);
 318                        continue;
 319                }
 320
 321                if (hwlock->bank->dev->of_node == args.np) {
 322                        ret = 0;
 323                        break;
 324                }
 325        }
 326        rcu_read_unlock();
 327        if (ret < 0)
 328                goto out;
 329
 330        id = of_hwspin_lock_simple_xlate(&args);
 331        if (id < 0 || id >= hwlock->bank->num_locks) {
 332                ret = -EINVAL;
 333                goto out;
 334        }
 335        id += hwlock->bank->base_id;
 336
 337out:
 338        of_node_put(args.np);
 339        return ret ? ret : id;
 340}
 341EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id);
 342
 343static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
 344{
 345        struct hwspinlock *tmp;
 346        int ret;
 347
 348        mutex_lock(&hwspinlock_tree_lock);
 349
 350        ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
 351        if (ret) {
 352                if (ret == -EEXIST)
 353                        pr_err("hwspinlock id %d already exists!\n", id);
 354                goto out;
 355        }
 356
 357        /* mark this hwspinlock as available */
 358        tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
 359
 360        /* self-sanity check which should never fail */
 361        WARN_ON(tmp != hwlock);
 362
 363out:
 364        mutex_unlock(&hwspinlock_tree_lock);
 365        return 0;
 366}
 367
 368static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
 369{
 370        struct hwspinlock *hwlock = NULL;
 371        int ret;
 372
 373        mutex_lock(&hwspinlock_tree_lock);
 374
 375        /* make sure the hwspinlock is not in use (tag is set) */
 376        ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
 377        if (ret == 0) {
 378                pr_err("hwspinlock %d still in use (or not present)\n", id);
 379                goto out;
 380        }
 381
 382        hwlock = radix_tree_delete(&hwspinlock_tree, id);
 383        if (!hwlock) {
 384                pr_err("failed to delete hwspinlock %d\n", id);
 385                goto out;
 386        }
 387
 388out:
 389        mutex_unlock(&hwspinlock_tree_lock);
 390        return hwlock;
 391}
 392
 393/**
 394 * hwspin_lock_register() - register a new hw spinlock device
 395 * @bank: the hwspinlock device, which usually provides numerous hw locks
 396 * @dev: the backing device
 397 * @ops: hwspinlock handlers for this device
 398 * @base_id: id of the first hardware spinlock in this bank
 399 * @num_locks: number of hwspinlocks provided by this device
 400 *
 401 * This function should be called from the underlying platform-specific
 402 * implementation, to register a new hwspinlock device instance.
 403 *
 404 * Should be called from a process context (might sleep)
 405 *
 406 * Returns 0 on success, or an appropriate error code on failure
 407 */
 408int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
 409                const struct hwspinlock_ops *ops, int base_id, int num_locks)
 410{
 411        struct hwspinlock *hwlock;
 412        int ret = 0, i;
 413
 414        if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
 415                                                        !ops->unlock) {
 416                pr_err("invalid parameters\n");
 417                return -EINVAL;
 418        }
 419
 420        bank->dev = dev;
 421        bank->ops = ops;
 422        bank->base_id = base_id;
 423        bank->num_locks = num_locks;
 424
 425        for (i = 0; i < num_locks; i++) {
 426                hwlock = &bank->lock[i];
 427
 428                spin_lock_init(&hwlock->lock);
 429                hwlock->bank = bank;
 430
 431                ret = hwspin_lock_register_single(hwlock, base_id + i);
 432                if (ret)
 433                        goto reg_failed;
 434        }
 435
 436        return 0;
 437
 438reg_failed:
 439        while (--i >= 0)
 440                hwspin_lock_unregister_single(base_id + i);
 441        return ret;
 442}
 443EXPORT_SYMBOL_GPL(hwspin_lock_register);
 444
 445/**
 446 * hwspin_lock_unregister() - unregister an hw spinlock device
 447 * @bank: the hwspinlock device, which usually provides numerous hw locks
 448 *
 449 * This function should be called from the underlying platform-specific
 450 * implementation, to unregister an existing (and unused) hwspinlock.
 451 *
 452 * Should be called from a process context (might sleep)
 453 *
 454 * Returns 0 on success, or an appropriate error code on failure
 455 */
 456int hwspin_lock_unregister(struct hwspinlock_device *bank)
 457{
 458        struct hwspinlock *hwlock, *tmp;
 459        int i;
 460
 461        for (i = 0; i < bank->num_locks; i++) {
 462                hwlock = &bank->lock[i];
 463
 464                tmp = hwspin_lock_unregister_single(bank->base_id + i);
 465                if (!tmp)
 466                        return -EBUSY;
 467
 468                /* self-sanity check that should never fail */
 469                WARN_ON(tmp != hwlock);
 470        }
 471
 472        return 0;
 473}
 474EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
 475
 476/**
 477 * __hwspin_lock_request() - tag an hwspinlock as used and power it up
 478 *
 479 * This is an internal function that prepares an hwspinlock instance
 480 * before it is given to the user. The function assumes that
 481 * hwspinlock_tree_lock is taken.
 482 *
 483 * Returns 0 or positive to indicate success, and a negative value to
 484 * indicate an error (with the appropriate error code)
 485 */
 486static int __hwspin_lock_request(struct hwspinlock *hwlock)
 487{
 488        struct device *dev = hwlock->bank->dev;
 489        struct hwspinlock *tmp;
 490        int ret;
 491
 492        /* prevent underlying implementation from being removed */
 493        if (!try_module_get(dev->driver->owner)) {
 494                dev_err(dev, "%s: can't get owner\n", __func__);
 495                return -EINVAL;
 496        }
 497
 498        /* notify PM core that power is now needed */
 499        ret = pm_runtime_get_sync(dev);
 500        if (ret < 0) {
 501                dev_err(dev, "%s: can't power on device\n", __func__);
 502                pm_runtime_put_noidle(dev);
 503                module_put(dev->driver->owner);
 504                return ret;
 505        }
 506
 507        /* mark hwspinlock as used, should not fail */
 508        tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
 509                                                        HWSPINLOCK_UNUSED);
 510
 511        /* self-sanity check that should never fail */
 512        WARN_ON(tmp != hwlock);
 513
 514        return ret;
 515}
 516
 517/**
 518 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
 519 * @hwlock: a valid hwspinlock instance
 520 *
 521 * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
 522 */
 523int hwspin_lock_get_id(struct hwspinlock *hwlock)
 524{
 525        if (!hwlock) {
 526                pr_err("invalid hwlock\n");
 527                return -EINVAL;
 528        }
 529
 530        return hwlock_to_id(hwlock);
 531}
 532EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
 533
 534/**
 535 * hwspin_lock_request() - request an hwspinlock
 536 *
 537 * This function should be called by users of the hwspinlock device,
 538 * in order to dynamically assign them an unused hwspinlock.
 539 * Usually the user of this lock will then have to communicate the lock's id
 540 * to the remote core before it can be used for synchronization (to get the
 541 * id of a given hwlock, use hwspin_lock_get_id()).
 542 *
 543 * Should be called from a process context (might sleep)
 544 *
 545 * Returns the address of the assigned hwspinlock, or NULL on error
 546 */
 547struct hwspinlock *hwspin_lock_request(void)
 548{
 549        struct hwspinlock *hwlock;
 550        int ret;
 551
 552        mutex_lock(&hwspinlock_tree_lock);
 553
 554        /* look for an unused lock */
 555        ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
 556                                                0, 1, HWSPINLOCK_UNUSED);
 557        if (ret == 0) {
 558                pr_warn("a free hwspinlock is not available\n");
 559                hwlock = NULL;
 560                goto out;
 561        }
 562
 563        /* sanity check that should never fail */
 564        WARN_ON(ret > 1);
 565
 566        /* mark as used and power up */
 567        ret = __hwspin_lock_request(hwlock);
 568        if (ret < 0)
 569                hwlock = NULL;
 570
 571out:
 572        mutex_unlock(&hwspinlock_tree_lock);
 573        return hwlock;
 574}
 575EXPORT_SYMBOL_GPL(hwspin_lock_request);
 576
 577/**
 578 * hwspin_lock_request_specific() - request for a specific hwspinlock
 579 * @id: index of the specific hwspinlock that is requested
 580 *
 581 * This function should be called by users of the hwspinlock module,
 582 * in order to assign them a specific hwspinlock.
 583 * Usually early board code will be calling this function in order to
 584 * reserve specific hwspinlock ids for predefined purposes.
 585 *
 586 * Should be called from a process context (might sleep)
 587 *
 588 * Returns the address of the assigned hwspinlock, or NULL on error
 589 */
 590struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
 591{
 592        struct hwspinlock *hwlock;
 593        int ret;
 594
 595        mutex_lock(&hwspinlock_tree_lock);
 596
 597        /* make sure this hwspinlock exists */
 598        hwlock = radix_tree_lookup(&hwspinlock_tree, id);
 599        if (!hwlock) {
 600                pr_warn("hwspinlock %u does not exist\n", id);
 601                goto out;
 602        }
 603
 604        /* sanity check (this shouldn't happen) */
 605        WARN_ON(hwlock_to_id(hwlock) != id);
 606
 607        /* make sure this hwspinlock is unused */
 608        ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
 609        if (ret == 0) {
 610                pr_warn("hwspinlock %u is already in use\n", id);
 611                hwlock = NULL;
 612                goto out;
 613        }
 614
 615        /* mark as used and power up */
 616        ret = __hwspin_lock_request(hwlock);
 617        if (ret < 0)
 618                hwlock = NULL;
 619
 620out:
 621        mutex_unlock(&hwspinlock_tree_lock);
 622        return hwlock;
 623}
 624EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
 625
 626/**
 627 * hwspin_lock_free() - free a specific hwspinlock
 628 * @hwlock: the specific hwspinlock to free
 629 *
 630 * This function mark @hwlock as free again.
 631 * Should only be called with an @hwlock that was retrieved from
 632 * an earlier call to omap_hwspin_lock_request{_specific}.
 633 *
 634 * Should be called from a process context (might sleep)
 635 *
 636 * Returns 0 on success, or an appropriate error code on failure
 637 */
 638int hwspin_lock_free(struct hwspinlock *hwlock)
 639{
 640        struct device *dev;
 641        struct hwspinlock *tmp;
 642        int ret;
 643
 644        if (!hwlock) {
 645                pr_err("invalid hwlock\n");
 646                return -EINVAL;
 647        }
 648
 649        dev = hwlock->bank->dev;
 650        mutex_lock(&hwspinlock_tree_lock);
 651
 652        /* make sure the hwspinlock is used */
 653        ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
 654                                                        HWSPINLOCK_UNUSED);
 655        if (ret == 1) {
 656                dev_err(dev, "%s: hwlock is already free\n", __func__);
 657                dump_stack();
 658                ret = -EINVAL;
 659                goto out;
 660        }
 661
 662        /* notify the underlying device that power is not needed */
 663        ret = pm_runtime_put(dev);
 664        if (ret < 0)
 665                goto out;
 666
 667        /* mark this hwspinlock as available */
 668        tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
 669                                                        HWSPINLOCK_UNUSED);
 670
 671        /* sanity check (this shouldn't happen) */
 672        WARN_ON(tmp != hwlock);
 673
 674        module_put(dev->driver->owner);
 675
 676out:
 677        mutex_unlock(&hwspinlock_tree_lock);
 678        return ret;
 679}
 680EXPORT_SYMBOL_GPL(hwspin_lock_free);
 681
 682MODULE_LICENSE("GPL v2");
 683MODULE_DESCRIPTION("Hardware spinlock interface");
 684MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
 685