linux/drivers/base/power/runtime.c
<<
>>
Prefs
   1/*
   2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
   3 *
   4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
   6 *
   7 * This file is released under the GPLv2.
   8 */
   9
  10#include <linux/sched.h>
  11#include <linux/export.h>
  12#include <linux/pm_runtime.h>
  13#include <trace/events/rpm.h>
  14#include "power.h"
  15
  16static int rpm_resume(struct device *dev, int rpmflags);
  17static int rpm_suspend(struct device *dev, int rpmflags);
  18
  19/**
  20 * update_pm_runtime_accounting - Update the time accounting of power states
  21 * @dev: Device to update the accounting for
  22 *
  23 * In order to be able to have time accounting of the various power states
  24 * (as used by programs such as PowerTOP to show the effectiveness of runtime
  25 * PM), we need to track the time spent in each state.
  26 * update_pm_runtime_accounting must be called each time before the
  27 * runtime_status field is updated, to account the time in the old state
  28 * correctly.
  29 */
  30void update_pm_runtime_accounting(struct device *dev)
  31{
  32        unsigned long now = jiffies;
  33        unsigned long delta;
  34
  35        delta = now - dev->power.accounting_timestamp;
  36
  37        dev->power.accounting_timestamp = now;
  38
  39        if (dev->power.disable_depth > 0)
  40                return;
  41
  42        if (dev->power.runtime_status == RPM_SUSPENDED)
  43                dev->power.suspended_jiffies += delta;
  44        else
  45                dev->power.active_jiffies += delta;
  46}
  47
  48static void __update_runtime_status(struct device *dev, enum rpm_status status)
  49{
  50        update_pm_runtime_accounting(dev);
  51        dev->power.runtime_status = status;
  52}
  53
  54/**
  55 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
  56 * @dev: Device to handle.
  57 */
  58static void pm_runtime_deactivate_timer(struct device *dev)
  59{
  60        if (dev->power.timer_expires > 0) {
  61                del_timer(&dev->power.suspend_timer);
  62                dev->power.timer_expires = 0;
  63        }
  64}
  65
  66/**
  67 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
  68 * @dev: Device to handle.
  69 */
  70static void pm_runtime_cancel_pending(struct device *dev)
  71{
  72        pm_runtime_deactivate_timer(dev);
  73        /*
  74         * In case there's a request pending, make sure its work function will
  75         * return without doing anything.
  76         */
  77        dev->power.request = RPM_REQ_NONE;
  78}
  79
  80/*
  81 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
  82 * @dev: Device to handle.
  83 *
  84 * Compute the autosuspend-delay expiration time based on the device's
  85 * power.last_busy time.  If the delay has already expired or is disabled
  86 * (negative) or the power.use_autosuspend flag isn't set, return 0.
  87 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
  88 *
  89 * This function may be called either with or without dev->power.lock held.
  90 * Either way it can be racy, since power.last_busy may be updated at any time.
  91 */
  92unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
  93{
  94        int autosuspend_delay;
  95        long elapsed;
  96        unsigned long last_busy;
  97        unsigned long expires = 0;
  98
  99        if (!dev->power.use_autosuspend)
 100                goto out;
 101
 102        autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
 103        if (autosuspend_delay < 0)
 104                goto out;
 105
 106        last_busy = ACCESS_ONCE(dev->power.last_busy);
 107        elapsed = jiffies - last_busy;
 108        if (elapsed < 0)
 109                goto out;       /* jiffies has wrapped around. */
 110
 111        /*
 112         * If the autosuspend_delay is >= 1 second, align the timer by rounding
 113         * up to the nearest second.
 114         */
 115        expires = last_busy + msecs_to_jiffies(autosuspend_delay);
 116        if (autosuspend_delay >= 1000)
 117                expires = round_jiffies(expires);
 118        expires += !expires;
 119        if (elapsed >= expires - last_busy)
 120                expires = 0;    /* Already expired. */
 121
 122 out:
 123        return expires;
 124}
 125EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
 126
 127static int dev_memalloc_noio(struct device *dev, void *data)
 128{
 129        return dev->power.memalloc_noio;
 130}
 131
 132/*
 133 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
 134 * @dev: Device to handle.
 135 * @enable: True for setting the flag and False for clearing the flag.
 136 *
 137 * Set the flag for all devices in the path from the device to the
 138 * root device in the device tree if @enable is true, otherwise clear
 139 * the flag for devices in the path whose siblings don't set the flag.
 140 *
 141 * The function should only be called by block device, or network
 142 * device driver for solving the deadlock problem during runtime
 143 * resume/suspend:
 144 *
 145 *     If memory allocation with GFP_KERNEL is called inside runtime
 146 *     resume/suspend callback of any one of its ancestors(or the
 147 *     block device itself), the deadlock may be triggered inside the
 148 *     memory allocation since it might not complete until the block
 149 *     device becomes active and the involed page I/O finishes. The
 150 *     situation is pointed out first by Alan Stern. Network device
 151 *     are involved in iSCSI kind of situation.
 152 *
 153 * The lock of dev_hotplug_mutex is held in the function for handling
 154 * hotplug race because pm_runtime_set_memalloc_noio() may be called
 155 * in async probe().
 156 *
 157 * The function should be called between device_add() and device_del()
 158 * on the affected device(block/network device).
 159 */
 160void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
 161{
 162        static DEFINE_MUTEX(dev_hotplug_mutex);
 163
 164        mutex_lock(&dev_hotplug_mutex);
 165        for (;;) {
 166                bool enabled;
 167
 168                /* hold power lock since bitfield is not SMP-safe. */
 169                spin_lock_irq(&dev->power.lock);
 170                enabled = dev->power.memalloc_noio;
 171                dev->power.memalloc_noio = enable;
 172                spin_unlock_irq(&dev->power.lock);
 173
 174                /*
 175                 * not need to enable ancestors any more if the device
 176                 * has been enabled.
 177                 */
 178                if (enabled && enable)
 179                        break;
 180
 181                dev = dev->parent;
 182
 183                /*
 184                 * clear flag of the parent device only if all the
 185                 * children don't set the flag because ancestor's
 186                 * flag was set by any one of the descendants.
 187                 */
 188                if (!dev || (!enable &&
 189                             device_for_each_child(dev, NULL,
 190                                                   dev_memalloc_noio)))
 191                        break;
 192        }
 193        mutex_unlock(&dev_hotplug_mutex);
 194}
 195EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
 196
 197/**
 198 * rpm_check_suspend_allowed - Test whether a device may be suspended.
 199 * @dev: Device to test.
 200 */
 201static int rpm_check_suspend_allowed(struct device *dev)
 202{
 203        int retval = 0;
 204
 205        if (dev->power.runtime_error)
 206                retval = -EINVAL;
 207        else if (dev->power.disable_depth > 0)
 208                retval = -EACCES;
 209        else if (atomic_read(&dev->power.usage_count) > 0)
 210                retval = -EAGAIN;
 211        else if (!pm_children_suspended(dev))
 212                retval = -EBUSY;
 213
 214        /* Pending resume requests take precedence over suspends. */
 215        else if ((dev->power.deferred_resume
 216                        && dev->power.runtime_status == RPM_SUSPENDING)
 217            || (dev->power.request_pending
 218                        && dev->power.request == RPM_REQ_RESUME))
 219                retval = -EAGAIN;
 220        else if (__dev_pm_qos_read_value(dev) < 0)
 221                retval = -EPERM;
 222        else if (dev->power.runtime_status == RPM_SUSPENDED)
 223                retval = 1;
 224
 225        return retval;
 226}
 227
 228/**
 229 * __rpm_callback - Run a given runtime PM callback for a given device.
 230 * @cb: Runtime PM callback to run.
 231 * @dev: Device to run the callback for.
 232 */
 233static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
 234        __releases(&dev->power.lock) __acquires(&dev->power.lock)
 235{
 236        int retval;
 237
 238        if (dev->power.irq_safe)
 239                spin_unlock(&dev->power.lock);
 240        else
 241                spin_unlock_irq(&dev->power.lock);
 242
 243        retval = cb(dev);
 244
 245        if (dev->power.irq_safe)
 246                spin_lock(&dev->power.lock);
 247        else
 248                spin_lock_irq(&dev->power.lock);
 249
 250        return retval;
 251}
 252
 253/**
 254 * rpm_idle - Notify device bus type if the device can be suspended.
 255 * @dev: Device to notify the bus type about.
 256 * @rpmflags: Flag bits.
 257 *
 258 * Check if the device's runtime PM status allows it to be suspended.  If
 259 * another idle notification has been started earlier, return immediately.  If
 260 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
 261 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
 262 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
 263 *
 264 * This function must be called under dev->power.lock with interrupts disabled.
 265 */
 266static int rpm_idle(struct device *dev, int rpmflags)
 267{
 268        int (*callback)(struct device *);
 269        int retval;
 270
 271        trace_rpm_idle(dev, rpmflags);
 272        retval = rpm_check_suspend_allowed(dev);
 273        if (retval < 0)
 274                ;       /* Conditions are wrong. */
 275
 276        /* Idle notifications are allowed only in the RPM_ACTIVE state. */
 277        else if (dev->power.runtime_status != RPM_ACTIVE)
 278                retval = -EAGAIN;
 279
 280        /*
 281         * Any pending request other than an idle notification takes
 282         * precedence over us, except that the timer may be running.
 283         */
 284        else if (dev->power.request_pending &&
 285            dev->power.request > RPM_REQ_IDLE)
 286                retval = -EAGAIN;
 287
 288        /* Act as though RPM_NOWAIT is always set. */
 289        else if (dev->power.idle_notification)
 290                retval = -EINPROGRESS;
 291        if (retval)
 292                goto out;
 293
 294        /* Pending requests need to be canceled. */
 295        dev->power.request = RPM_REQ_NONE;
 296
 297        if (dev->power.no_callbacks)
 298                goto out;
 299
 300        /* Carry out an asynchronous or a synchronous idle notification. */
 301        if (rpmflags & RPM_ASYNC) {
 302                dev->power.request = RPM_REQ_IDLE;
 303                if (!dev->power.request_pending) {
 304                        dev->power.request_pending = true;
 305                        queue_work(pm_wq, &dev->power.work);
 306                }
 307                trace_rpm_return_int(dev, _THIS_IP_, 0);
 308                return 0;
 309        }
 310
 311        dev->power.idle_notification = true;
 312
 313        if (dev->pm_domain)
 314                callback = dev->pm_domain->ops.runtime_idle;
 315        else if (dev->type && dev->type->pm)
 316                callback = dev->type->pm->runtime_idle;
 317        else if (dev->class && dev->class->pm)
 318                callback = dev->class->pm->runtime_idle;
 319        else if (dev->bus && dev->bus->pm)
 320                callback = dev->bus->pm->runtime_idle;
 321        else
 322                callback = NULL;
 323
 324        if (!callback && dev->driver && dev->driver->pm)
 325                callback = dev->driver->pm->runtime_idle;
 326
 327        if (callback)
 328                retval = __rpm_callback(callback, dev);
 329
 330        dev->power.idle_notification = false;
 331        wake_up_all(&dev->power.wait_queue);
 332
 333 out:
 334        trace_rpm_return_int(dev, _THIS_IP_, retval);
 335        return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 336}
 337
 338/**
 339 * rpm_callback - Run a given runtime PM callback for a given device.
 340 * @cb: Runtime PM callback to run.
 341 * @dev: Device to run the callback for.
 342 */
 343static int rpm_callback(int (*cb)(struct device *), struct device *dev)
 344{
 345        int retval;
 346
 347        if (!cb)
 348                return -ENOSYS;
 349
 350        if (dev->power.memalloc_noio) {
 351                unsigned int noio_flag;
 352
 353                /*
 354                 * Deadlock might be caused if memory allocation with
 355                 * GFP_KERNEL happens inside runtime_suspend and
 356                 * runtime_resume callbacks of one block device's
 357                 * ancestor or the block device itself. Network
 358                 * device might be thought as part of iSCSI block
 359                 * device, so network device and its ancestor should
 360                 * be marked as memalloc_noio too.
 361                 */
 362                noio_flag = memalloc_noio_save();
 363                retval = __rpm_callback(cb, dev);
 364                memalloc_noio_restore(noio_flag);
 365        } else {
 366                retval = __rpm_callback(cb, dev);
 367        }
 368
 369        dev->power.runtime_error = retval;
 370        return retval != -EACCES ? retval : -EIO;
 371}
 372
 373/**
 374 * rpm_suspend - Carry out runtime suspend of given device.
 375 * @dev: Device to suspend.
 376 * @rpmflags: Flag bits.
 377 *
 378 * Check if the device's runtime PM status allows it to be suspended.
 379 * Cancel a pending idle notification, autosuspend or suspend. If
 380 * another suspend has been started earlier, either return immediately
 381 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
 382 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
 383 * otherwise run the ->runtime_suspend() callback directly. When
 384 * ->runtime_suspend succeeded, if a deferred resume was requested while
 385 * the callback was running then carry it out, otherwise send an idle
 386 * notification for its parent (if the suspend succeeded and both
 387 * ignore_children of parent->power and irq_safe of dev->power are not set).
 388 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
 389 * flag is set and the next autosuspend-delay expiration time is in the
 390 * future, schedule another autosuspend attempt.
 391 *
 392 * This function must be called under dev->power.lock with interrupts disabled.
 393 */
 394static int rpm_suspend(struct device *dev, int rpmflags)
 395        __releases(&dev->power.lock) __acquires(&dev->power.lock)
 396{
 397        int (*callback)(struct device *);
 398        struct device *parent = NULL;
 399        int retval;
 400
 401        trace_rpm_suspend(dev, rpmflags);
 402
 403 repeat:
 404        retval = rpm_check_suspend_allowed(dev);
 405
 406        if (retval < 0)
 407                ;       /* Conditions are wrong. */
 408
 409        /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
 410        else if (dev->power.runtime_status == RPM_RESUMING &&
 411            !(rpmflags & RPM_ASYNC))
 412                retval = -EAGAIN;
 413        if (retval)
 414                goto out;
 415
 416        /* If the autosuspend_delay time hasn't expired yet, reschedule. */
 417        if ((rpmflags & RPM_AUTO)
 418            && dev->power.runtime_status != RPM_SUSPENDING) {
 419                unsigned long expires = pm_runtime_autosuspend_expiration(dev);
 420
 421                if (expires != 0) {
 422                        /* Pending requests need to be canceled. */
 423                        dev->power.request = RPM_REQ_NONE;
 424
 425                        /*
 426                         * Optimization: If the timer is already running and is
 427                         * set to expire at or before the autosuspend delay,
 428                         * avoid the overhead of resetting it.  Just let it
 429                         * expire; pm_suspend_timer_fn() will take care of the
 430                         * rest.
 431                         */
 432                        if (!(dev->power.timer_expires && time_before_eq(
 433                            dev->power.timer_expires, expires))) {
 434                                dev->power.timer_expires = expires;
 435                                mod_timer(&dev->power.suspend_timer, expires);
 436                        }
 437                        dev->power.timer_autosuspends = 1;
 438                        goto out;
 439                }
 440        }
 441
 442        /* Other scheduled or pending requests need to be canceled. */
 443        pm_runtime_cancel_pending(dev);
 444
 445        if (dev->power.runtime_status == RPM_SUSPENDING) {
 446                DEFINE_WAIT(wait);
 447
 448                if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 449                        retval = -EINPROGRESS;
 450                        goto out;
 451                }
 452
 453                if (dev->power.irq_safe) {
 454                        spin_unlock(&dev->power.lock);
 455
 456                        cpu_relax();
 457
 458                        spin_lock(&dev->power.lock);
 459                        goto repeat;
 460                }
 461
 462                /* Wait for the other suspend running in parallel with us. */
 463                for (;;) {
 464                        prepare_to_wait(&dev->power.wait_queue, &wait,
 465                                        TASK_UNINTERRUPTIBLE);
 466                        if (dev->power.runtime_status != RPM_SUSPENDING)
 467                                break;
 468
 469                        spin_unlock_irq(&dev->power.lock);
 470
 471                        schedule();
 472
 473                        spin_lock_irq(&dev->power.lock);
 474                }
 475                finish_wait(&dev->power.wait_queue, &wait);
 476                goto repeat;
 477        }
 478
 479        if (dev->power.no_callbacks)
 480                goto no_callback;       /* Assume success. */
 481
 482        /* Carry out an asynchronous or a synchronous suspend. */
 483        if (rpmflags & RPM_ASYNC) {
 484                dev->power.request = (rpmflags & RPM_AUTO) ?
 485                    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
 486                if (!dev->power.request_pending) {
 487                        dev->power.request_pending = true;
 488                        queue_work(pm_wq, &dev->power.work);
 489                }
 490                goto out;
 491        }
 492
 493        __update_runtime_status(dev, RPM_SUSPENDING);
 494
 495        if (dev->pm_domain)
 496                callback = dev->pm_domain->ops.runtime_suspend;
 497        else if (dev->type && dev->type->pm)
 498                callback = dev->type->pm->runtime_suspend;
 499        else if (dev->class && dev->class->pm)
 500                callback = dev->class->pm->runtime_suspend;
 501        else if (dev->bus && dev->bus->pm)
 502                callback = dev->bus->pm->runtime_suspend;
 503        else
 504                callback = NULL;
 505
 506        if (!callback && dev->driver && dev->driver->pm)
 507                callback = dev->driver->pm->runtime_suspend;
 508
 509        retval = rpm_callback(callback, dev);
 510        if (retval)
 511                goto fail;
 512
 513 no_callback:
 514        __update_runtime_status(dev, RPM_SUSPENDED);
 515        pm_runtime_deactivate_timer(dev);
 516
 517        if (dev->parent) {
 518                parent = dev->parent;
 519                atomic_add_unless(&parent->power.child_count, -1, 0);
 520        }
 521        wake_up_all(&dev->power.wait_queue);
 522
 523        if (dev->power.deferred_resume) {
 524                dev->power.deferred_resume = false;
 525                rpm_resume(dev, 0);
 526                retval = -EAGAIN;
 527                goto out;
 528        }
 529
 530        /* Maybe the parent is now able to suspend. */
 531        if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
 532                spin_unlock(&dev->power.lock);
 533
 534                spin_lock(&parent->power.lock);
 535                rpm_idle(parent, RPM_ASYNC);
 536                spin_unlock(&parent->power.lock);
 537
 538                spin_lock(&dev->power.lock);
 539        }
 540
 541 out:
 542        trace_rpm_return_int(dev, _THIS_IP_, retval);
 543
 544        return retval;
 545
 546 fail:
 547        __update_runtime_status(dev, RPM_ACTIVE);
 548        dev->power.deferred_resume = false;
 549        wake_up_all(&dev->power.wait_queue);
 550
 551        if (retval == -EAGAIN || retval == -EBUSY) {
 552                dev->power.runtime_error = 0;
 553
 554                /*
 555                 * If the callback routine failed an autosuspend, and
 556                 * if the last_busy time has been updated so that there
 557                 * is a new autosuspend expiration time, automatically
 558                 * reschedule another autosuspend.
 559                 */
 560                if ((rpmflags & RPM_AUTO) &&
 561                    pm_runtime_autosuspend_expiration(dev) != 0)
 562                        goto repeat;
 563        } else {
 564                pm_runtime_cancel_pending(dev);
 565        }
 566        goto out;
 567}
 568
 569/**
 570 * rpm_resume - Carry out runtime resume of given device.
 571 * @dev: Device to resume.
 572 * @rpmflags: Flag bits.
 573 *
 574 * Check if the device's runtime PM status allows it to be resumed.  Cancel
 575 * any scheduled or pending requests.  If another resume has been started
 576 * earlier, either return immediately or wait for it to finish, depending on the
 577 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
 578 * parallel with this function, either tell the other process to resume after
 579 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
 580 * flag is set then queue a resume request; otherwise run the
 581 * ->runtime_resume() callback directly.  Queue an idle notification for the
 582 * device if the resume succeeded.
 583 *
 584 * This function must be called under dev->power.lock with interrupts disabled.
 585 */
 586static int rpm_resume(struct device *dev, int rpmflags)
 587        __releases(&dev->power.lock) __acquires(&dev->power.lock)
 588{
 589        int (*callback)(struct device *);
 590        struct device *parent = NULL;
 591        int retval = 0;
 592
 593        trace_rpm_resume(dev, rpmflags);
 594
 595 repeat:
 596        if (dev->power.runtime_error)
 597                retval = -EINVAL;
 598        else if (dev->power.disable_depth == 1 && dev->power.is_suspended
 599            && dev->power.runtime_status == RPM_ACTIVE)
 600                retval = 1;
 601        else if (dev->power.disable_depth > 0)
 602                retval = -EACCES;
 603        if (retval)
 604                goto out;
 605
 606        /*
 607         * Other scheduled or pending requests need to be canceled.  Small
 608         * optimization: If an autosuspend timer is running, leave it running
 609         * rather than cancelling it now only to restart it again in the near
 610         * future.
 611         */
 612        dev->power.request = RPM_REQ_NONE;
 613        if (!dev->power.timer_autosuspends)
 614                pm_runtime_deactivate_timer(dev);
 615
 616        if (dev->power.runtime_status == RPM_ACTIVE) {
 617                retval = 1;
 618                goto out;
 619        }
 620
 621        if (dev->power.runtime_status == RPM_RESUMING
 622            || dev->power.runtime_status == RPM_SUSPENDING) {
 623                DEFINE_WAIT(wait);
 624
 625                if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 626                        if (dev->power.runtime_status == RPM_SUSPENDING)
 627                                dev->power.deferred_resume = true;
 628                        else
 629                                retval = -EINPROGRESS;
 630                        goto out;
 631                }
 632
 633                if (dev->power.irq_safe) {
 634                        spin_unlock(&dev->power.lock);
 635
 636                        cpu_relax();
 637
 638                        spin_lock(&dev->power.lock);
 639                        goto repeat;
 640                }
 641
 642                /* Wait for the operation carried out in parallel with us. */
 643                for (;;) {
 644                        prepare_to_wait(&dev->power.wait_queue, &wait,
 645                                        TASK_UNINTERRUPTIBLE);
 646                        if (dev->power.runtime_status != RPM_RESUMING
 647                            && dev->power.runtime_status != RPM_SUSPENDING)
 648                                break;
 649
 650                        spin_unlock_irq(&dev->power.lock);
 651
 652                        schedule();
 653
 654                        spin_lock_irq(&dev->power.lock);
 655                }
 656                finish_wait(&dev->power.wait_queue, &wait);
 657                goto repeat;
 658        }
 659
 660        /*
 661         * See if we can skip waking up the parent.  This is safe only if
 662         * power.no_callbacks is set, because otherwise we don't know whether
 663         * the resume will actually succeed.
 664         */
 665        if (dev->power.no_callbacks && !parent && dev->parent) {
 666                spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
 667                if (dev->parent->power.disable_depth > 0
 668                    || dev->parent->power.ignore_children
 669                    || dev->parent->power.runtime_status == RPM_ACTIVE) {
 670                        atomic_inc(&dev->parent->power.child_count);
 671                        spin_unlock(&dev->parent->power.lock);
 672                        retval = 1;
 673                        goto no_callback;       /* Assume success. */
 674                }
 675                spin_unlock(&dev->parent->power.lock);
 676        }
 677
 678        /* Carry out an asynchronous or a synchronous resume. */
 679        if (rpmflags & RPM_ASYNC) {
 680                dev->power.request = RPM_REQ_RESUME;
 681                if (!dev->power.request_pending) {
 682                        dev->power.request_pending = true;
 683                        queue_work(pm_wq, &dev->power.work);
 684                }
 685                retval = 0;
 686                goto out;
 687        }
 688
 689        if (!parent && dev->parent) {
 690                /*
 691                 * Increment the parent's usage counter and resume it if
 692                 * necessary.  Not needed if dev is irq-safe; then the
 693                 * parent is permanently resumed.
 694                 */
 695                parent = dev->parent;
 696                if (dev->power.irq_safe)
 697                        goto skip_parent;
 698                spin_unlock(&dev->power.lock);
 699
 700                pm_runtime_get_noresume(parent);
 701
 702                spin_lock(&parent->power.lock);
 703                /*
 704                 * We can resume if the parent's runtime PM is disabled or it
 705                 * is set to ignore children.
 706                 */
 707                if (!parent->power.disable_depth
 708                    && !parent->power.ignore_children) {
 709                        rpm_resume(parent, 0);
 710                        if (parent->power.runtime_status != RPM_ACTIVE)
 711                                retval = -EBUSY;
 712                }
 713                spin_unlock(&parent->power.lock);
 714
 715                spin_lock(&dev->power.lock);
 716                if (retval)
 717                        goto out;
 718                goto repeat;
 719        }
 720 skip_parent:
 721
 722        if (dev->power.no_callbacks)
 723                goto no_callback;       /* Assume success. */
 724
 725        __update_runtime_status(dev, RPM_RESUMING);
 726
 727        if (dev->pm_domain)
 728                callback = dev->pm_domain->ops.runtime_resume;
 729        else if (dev->type && dev->type->pm)
 730                callback = dev->type->pm->runtime_resume;
 731        else if (dev->class && dev->class->pm)
 732                callback = dev->class->pm->runtime_resume;
 733        else if (dev->bus && dev->bus->pm)
 734                callback = dev->bus->pm->runtime_resume;
 735        else
 736                callback = NULL;
 737
 738        if (!callback && dev->driver && dev->driver->pm)
 739                callback = dev->driver->pm->runtime_resume;
 740
 741        retval = rpm_callback(callback, dev);
 742        if (retval) {
 743                __update_runtime_status(dev, RPM_SUSPENDED);
 744                pm_runtime_cancel_pending(dev);
 745        } else {
 746 no_callback:
 747                __update_runtime_status(dev, RPM_ACTIVE);
 748                if (parent)
 749                        atomic_inc(&parent->power.child_count);
 750        }
 751        wake_up_all(&dev->power.wait_queue);
 752
 753        if (retval >= 0)
 754                rpm_idle(dev, RPM_ASYNC);
 755
 756 out:
 757        if (parent && !dev->power.irq_safe) {
 758                spin_unlock_irq(&dev->power.lock);
 759
 760                pm_runtime_put(parent);
 761
 762                spin_lock_irq(&dev->power.lock);
 763        }
 764
 765        trace_rpm_return_int(dev, _THIS_IP_, retval);
 766
 767        return retval;
 768}
 769
 770/**
 771 * pm_runtime_work - Universal runtime PM work function.
 772 * @work: Work structure used for scheduling the execution of this function.
 773 *
 774 * Use @work to get the device object the work is to be done for, determine what
 775 * is to be done and execute the appropriate runtime PM function.
 776 */
 777static void pm_runtime_work(struct work_struct *work)
 778{
 779        struct device *dev = container_of(work, struct device, power.work);
 780        enum rpm_request req;
 781
 782        spin_lock_irq(&dev->power.lock);
 783
 784        if (!dev->power.request_pending)
 785                goto out;
 786
 787        req = dev->power.request;
 788        dev->power.request = RPM_REQ_NONE;
 789        dev->power.request_pending = false;
 790
 791        switch (req) {
 792        case RPM_REQ_NONE:
 793                break;
 794        case RPM_REQ_IDLE:
 795                rpm_idle(dev, RPM_NOWAIT);
 796                break;
 797        case RPM_REQ_SUSPEND:
 798                rpm_suspend(dev, RPM_NOWAIT);
 799                break;
 800        case RPM_REQ_AUTOSUSPEND:
 801                rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
 802                break;
 803        case RPM_REQ_RESUME:
 804                rpm_resume(dev, RPM_NOWAIT);
 805                break;
 806        }
 807
 808 out:
 809        spin_unlock_irq(&dev->power.lock);
 810}
 811
 812/**
 813 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
 814 * @data: Device pointer passed by pm_schedule_suspend().
 815 *
 816 * Check if the time is right and queue a suspend request.
 817 */
 818static void pm_suspend_timer_fn(unsigned long data)
 819{
 820        struct device *dev = (struct device *)data;
 821        unsigned long flags;
 822        unsigned long expires;
 823
 824        spin_lock_irqsave(&dev->power.lock, flags);
 825
 826        expires = dev->power.timer_expires;
 827        /* If 'expire' is after 'jiffies' we've been called too early. */
 828        if (expires > 0 && !time_after(expires, jiffies)) {
 829                dev->power.timer_expires = 0;
 830                rpm_suspend(dev, dev->power.timer_autosuspends ?
 831                    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
 832        }
 833
 834        spin_unlock_irqrestore(&dev->power.lock, flags);
 835}
 836
 837/**
 838 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
 839 * @dev: Device to suspend.
 840 * @delay: Time to wait before submitting a suspend request, in milliseconds.
 841 */
 842int pm_schedule_suspend(struct device *dev, unsigned int delay)
 843{
 844        unsigned long flags;
 845        int retval;
 846
 847        spin_lock_irqsave(&dev->power.lock, flags);
 848
 849        if (!delay) {
 850                retval = rpm_suspend(dev, RPM_ASYNC);
 851                goto out;
 852        }
 853
 854        retval = rpm_check_suspend_allowed(dev);
 855        if (retval)
 856                goto out;
 857
 858        /* Other scheduled or pending requests need to be canceled. */
 859        pm_runtime_cancel_pending(dev);
 860
 861        dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
 862        dev->power.timer_expires += !dev->power.timer_expires;
 863        dev->power.timer_autosuspends = 0;
 864        mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
 865
 866 out:
 867        spin_unlock_irqrestore(&dev->power.lock, flags);
 868
 869        return retval;
 870}
 871EXPORT_SYMBOL_GPL(pm_schedule_suspend);
 872
 873/**
 874 * __pm_runtime_idle - Entry point for runtime idle operations.
 875 * @dev: Device to send idle notification for.
 876 * @rpmflags: Flag bits.
 877 *
 878 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 879 * return immediately if it is larger than zero.  Then carry out an idle
 880 * notification, either synchronous or asynchronous.
 881 *
 882 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
 883 * or if pm_runtime_irq_safe() has been called.
 884 */
 885int __pm_runtime_idle(struct device *dev, int rpmflags)
 886{
 887        unsigned long flags;
 888        int retval;
 889
 890        might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
 891
 892        if (rpmflags & RPM_GET_PUT) {
 893                if (!atomic_dec_and_test(&dev->power.usage_count))
 894                        return 0;
 895        }
 896
 897        spin_lock_irqsave(&dev->power.lock, flags);
 898        retval = rpm_idle(dev, rpmflags);
 899        spin_unlock_irqrestore(&dev->power.lock, flags);
 900
 901        return retval;
 902}
 903EXPORT_SYMBOL_GPL(__pm_runtime_idle);
 904
 905/**
 906 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
 907 * @dev: Device to suspend.
 908 * @rpmflags: Flag bits.
 909 *
 910 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 911 * return immediately if it is larger than zero.  Then carry out a suspend,
 912 * either synchronous or asynchronous.
 913 *
 914 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
 915 * or if pm_runtime_irq_safe() has been called.
 916 */
 917int __pm_runtime_suspend(struct device *dev, int rpmflags)
 918{
 919        unsigned long flags;
 920        int retval;
 921
 922        might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
 923
 924        if (rpmflags & RPM_GET_PUT) {
 925                if (!atomic_dec_and_test(&dev->power.usage_count))
 926                        return 0;
 927        }
 928
 929        spin_lock_irqsave(&dev->power.lock, flags);
 930        retval = rpm_suspend(dev, rpmflags);
 931        spin_unlock_irqrestore(&dev->power.lock, flags);
 932
 933        return retval;
 934}
 935EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
 936
 937/**
 938 * __pm_runtime_resume - Entry point for runtime resume operations.
 939 * @dev: Device to resume.
 940 * @rpmflags: Flag bits.
 941 *
 942 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
 943 * carry out a resume, either synchronous or asynchronous.
 944 *
 945 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
 946 * or if pm_runtime_irq_safe() has been called.
 947 */
 948int __pm_runtime_resume(struct device *dev, int rpmflags)
 949{
 950        unsigned long flags;
 951        int retval;
 952
 953        might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
 954
 955        if (rpmflags & RPM_GET_PUT)
 956                atomic_inc(&dev->power.usage_count);
 957
 958        spin_lock_irqsave(&dev->power.lock, flags);
 959        retval = rpm_resume(dev, rpmflags);
 960        spin_unlock_irqrestore(&dev->power.lock, flags);
 961
 962        return retval;
 963}
 964EXPORT_SYMBOL_GPL(__pm_runtime_resume);
 965
 966/**
 967 * __pm_runtime_set_status - Set runtime PM status of a device.
 968 * @dev: Device to handle.
 969 * @status: New runtime PM status of the device.
 970 *
 971 * If runtime PM of the device is disabled or its power.runtime_error field is
 972 * different from zero, the status may be changed either to RPM_ACTIVE, or to
 973 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
 974 * However, if the device has a parent and the parent is not active, and the
 975 * parent's power.ignore_children flag is unset, the device's status cannot be
 976 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
 977 *
 978 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
 979 * and the device parent's counter of unsuspended children is modified to
 980 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
 981 * notification request for the parent is submitted.
 982 */
 983int __pm_runtime_set_status(struct device *dev, unsigned int status)
 984{
 985        struct device *parent = dev->parent;
 986        unsigned long flags;
 987        bool notify_parent = false;
 988        int error = 0;
 989
 990        if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
 991                return -EINVAL;
 992
 993        spin_lock_irqsave(&dev->power.lock, flags);
 994
 995        if (!dev->power.runtime_error && !dev->power.disable_depth) {
 996                error = -EAGAIN;
 997                goto out;
 998        }
 999
1000        if (dev->power.runtime_status == status)
1001                goto out_set;
1002
1003        if (status == RPM_SUSPENDED) {
1004                /* It always is possible to set the status to 'suspended'. */
1005                if (parent) {
1006                        atomic_add_unless(&parent->power.child_count, -1, 0);
1007                        notify_parent = !parent->power.ignore_children;
1008                }
1009                goto out_set;
1010        }
1011
1012        if (parent) {
1013                spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1014
1015                /*
1016                 * It is invalid to put an active child under a parent that is
1017                 * not active, has runtime PM enabled and the
1018                 * 'power.ignore_children' flag unset.
1019                 */
1020                if (!parent->power.disable_depth
1021                    && !parent->power.ignore_children
1022                    && parent->power.runtime_status != RPM_ACTIVE)
1023                        error = -EBUSY;
1024                else if (dev->power.runtime_status == RPM_SUSPENDED)
1025                        atomic_inc(&parent->power.child_count);
1026
1027                spin_unlock(&parent->power.lock);
1028
1029                if (error)
1030                        goto out;
1031        }
1032
1033 out_set:
1034        __update_runtime_status(dev, status);
1035        dev->power.runtime_error = 0;
1036 out:
1037        spin_unlock_irqrestore(&dev->power.lock, flags);
1038
1039        if (notify_parent)
1040                pm_request_idle(parent);
1041
1042        return error;
1043}
1044EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1045
1046/**
1047 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1048 * @dev: Device to handle.
1049 *
1050 * Flush all pending requests for the device from pm_wq and wait for all
1051 * runtime PM operations involving the device in progress to complete.
1052 *
1053 * Should be called under dev->power.lock with interrupts disabled.
1054 */
1055static void __pm_runtime_barrier(struct device *dev)
1056{
1057        pm_runtime_deactivate_timer(dev);
1058
1059        if (dev->power.request_pending) {
1060                dev->power.request = RPM_REQ_NONE;
1061                spin_unlock_irq(&dev->power.lock);
1062
1063                cancel_work_sync(&dev->power.work);
1064
1065                spin_lock_irq(&dev->power.lock);
1066                dev->power.request_pending = false;
1067        }
1068
1069        if (dev->power.runtime_status == RPM_SUSPENDING
1070            || dev->power.runtime_status == RPM_RESUMING
1071            || dev->power.idle_notification) {
1072                DEFINE_WAIT(wait);
1073
1074                /* Suspend, wake-up or idle notification in progress. */
1075                for (;;) {
1076                        prepare_to_wait(&dev->power.wait_queue, &wait,
1077                                        TASK_UNINTERRUPTIBLE);
1078                        if (dev->power.runtime_status != RPM_SUSPENDING
1079                            && dev->power.runtime_status != RPM_RESUMING
1080                            && !dev->power.idle_notification)
1081                                break;
1082                        spin_unlock_irq(&dev->power.lock);
1083
1084                        schedule();
1085
1086                        spin_lock_irq(&dev->power.lock);
1087                }
1088                finish_wait(&dev->power.wait_queue, &wait);
1089        }
1090}
1091
1092/**
1093 * pm_runtime_barrier - Flush pending requests and wait for completions.
1094 * @dev: Device to handle.
1095 *
1096 * Prevent the device from being suspended by incrementing its usage counter and
1097 * if there's a pending resume request for the device, wake the device up.
1098 * Next, make sure that all pending requests for the device have been flushed
1099 * from pm_wq and wait for all runtime PM operations involving the device in
1100 * progress to complete.
1101 *
1102 * Return value:
1103 * 1, if there was a resume request pending and the device had to be woken up,
1104 * 0, otherwise
1105 */
1106int pm_runtime_barrier(struct device *dev)
1107{
1108        int retval = 0;
1109
1110        pm_runtime_get_noresume(dev);
1111        spin_lock_irq(&dev->power.lock);
1112
1113        if (dev->power.request_pending
1114            && dev->power.request == RPM_REQ_RESUME) {
1115                rpm_resume(dev, 0);
1116                retval = 1;
1117        }
1118
1119        __pm_runtime_barrier(dev);
1120
1121        spin_unlock_irq(&dev->power.lock);
1122        pm_runtime_put_noidle(dev);
1123
1124        return retval;
1125}
1126EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1127
1128/**
1129 * __pm_runtime_disable - Disable runtime PM of a device.
1130 * @dev: Device to handle.
1131 * @check_resume: If set, check if there's a resume request for the device.
1132 *
1133 * Increment power.disable_depth for the device and if was zero previously,
1134 * cancel all pending runtime PM requests for the device and wait for all
1135 * operations in progress to complete.  The device can be either active or
1136 * suspended after its runtime PM has been disabled.
1137 *
1138 * If @check_resume is set and there's a resume request pending when
1139 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1140 * function will wake up the device before disabling its runtime PM.
1141 */
1142void __pm_runtime_disable(struct device *dev, bool check_resume)
1143{
1144        spin_lock_irq(&dev->power.lock);
1145
1146        if (dev->power.disable_depth > 0) {
1147                dev->power.disable_depth++;
1148                goto out;
1149        }
1150
1151        /*
1152         * Wake up the device if there's a resume request pending, because that
1153         * means there probably is some I/O to process and disabling runtime PM
1154         * shouldn't prevent the device from processing the I/O.
1155         */
1156        if (check_resume && dev->power.request_pending
1157            && dev->power.request == RPM_REQ_RESUME) {
1158                /*
1159                 * Prevent suspends and idle notifications from being carried
1160                 * out after we have woken up the device.
1161                 */
1162                pm_runtime_get_noresume(dev);
1163
1164                rpm_resume(dev, 0);
1165
1166                pm_runtime_put_noidle(dev);
1167        }
1168
1169        if (!dev->power.disable_depth++)
1170                __pm_runtime_barrier(dev);
1171
1172 out:
1173        spin_unlock_irq(&dev->power.lock);
1174}
1175EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1176
1177/**
1178 * pm_runtime_enable - Enable runtime PM of a device.
1179 * @dev: Device to handle.
1180 */
1181void pm_runtime_enable(struct device *dev)
1182{
1183        unsigned long flags;
1184
1185        spin_lock_irqsave(&dev->power.lock, flags);
1186
1187        if (dev->power.disable_depth > 0)
1188                dev->power.disable_depth--;
1189        else
1190                dev_warn(dev, "Unbalanced %s!\n", __func__);
1191
1192        spin_unlock_irqrestore(&dev->power.lock, flags);
1193}
1194EXPORT_SYMBOL_GPL(pm_runtime_enable);
1195
1196/**
1197 * pm_runtime_forbid - Block runtime PM of a device.
1198 * @dev: Device to handle.
1199 *
1200 * Increase the device's usage count and clear its power.runtime_auto flag,
1201 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1202 * for it.
1203 */
1204void pm_runtime_forbid(struct device *dev)
1205{
1206        spin_lock_irq(&dev->power.lock);
1207        if (!dev->power.runtime_auto)
1208                goto out;
1209
1210        dev->power.runtime_auto = false;
1211        atomic_inc(&dev->power.usage_count);
1212        rpm_resume(dev, 0);
1213
1214 out:
1215        spin_unlock_irq(&dev->power.lock);
1216}
1217EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1218
1219/**
1220 * pm_runtime_allow - Unblock runtime PM of a device.
1221 * @dev: Device to handle.
1222 *
1223 * Decrease the device's usage count and set its power.runtime_auto flag.
1224 */
1225void pm_runtime_allow(struct device *dev)
1226{
1227        spin_lock_irq(&dev->power.lock);
1228        if (dev->power.runtime_auto)
1229                goto out;
1230
1231        dev->power.runtime_auto = true;
1232        if (atomic_dec_and_test(&dev->power.usage_count))
1233                rpm_idle(dev, RPM_AUTO);
1234
1235 out:
1236        spin_unlock_irq(&dev->power.lock);
1237}
1238EXPORT_SYMBOL_GPL(pm_runtime_allow);
1239
1240/**
1241 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1242 * @dev: Device to handle.
1243 *
1244 * Set the power.no_callbacks flag, which tells the PM core that this
1245 * device is power-managed through its parent and has no runtime PM
1246 * callbacks of its own.  The runtime sysfs attributes will be removed.
1247 */
1248void pm_runtime_no_callbacks(struct device *dev)
1249{
1250        spin_lock_irq(&dev->power.lock);
1251        dev->power.no_callbacks = 1;
1252        spin_unlock_irq(&dev->power.lock);
1253        if (device_is_registered(dev))
1254                rpm_sysfs_remove(dev);
1255}
1256EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1257
1258/**
1259 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1260 * @dev: Device to handle
1261 *
1262 * Set the power.irq_safe flag, which tells the PM core that the
1263 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1264 * always be invoked with the spinlock held and interrupts disabled.  It also
1265 * causes the parent's usage counter to be permanently incremented, preventing
1266 * the parent from runtime suspending -- otherwise an irq-safe child might have
1267 * to wait for a non-irq-safe parent.
1268 */
1269void pm_runtime_irq_safe(struct device *dev)
1270{
1271        if (dev->parent)
1272                pm_runtime_get_sync(dev->parent);
1273        spin_lock_irq(&dev->power.lock);
1274        dev->power.irq_safe = 1;
1275        spin_unlock_irq(&dev->power.lock);
1276}
1277EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1278
1279/**
1280 * update_autosuspend - Handle a change to a device's autosuspend settings.
1281 * @dev: Device to handle.
1282 * @old_delay: The former autosuspend_delay value.
1283 * @old_use: The former use_autosuspend value.
1284 *
1285 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1286 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1287 *
1288 * This function must be called under dev->power.lock with interrupts disabled.
1289 */
1290static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1291{
1292        int delay = dev->power.autosuspend_delay;
1293
1294        /* Should runtime suspend be prevented now? */
1295        if (dev->power.use_autosuspend && delay < 0) {
1296
1297                /* If it used to be allowed then prevent it. */
1298                if (!old_use || old_delay >= 0) {
1299                        atomic_inc(&dev->power.usage_count);
1300                        rpm_resume(dev, 0);
1301                }
1302        }
1303
1304        /* Runtime suspend should be allowed now. */
1305        else {
1306
1307                /* If it used to be prevented then allow it. */
1308                if (old_use && old_delay < 0)
1309                        atomic_dec(&dev->power.usage_count);
1310
1311                /* Maybe we can autosuspend now. */
1312                rpm_idle(dev, RPM_AUTO);
1313        }
1314}
1315
1316/**
1317 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1318 * @dev: Device to handle.
1319 * @delay: Value of the new delay in milliseconds.
1320 *
1321 * Set the device's power.autosuspend_delay value.  If it changes to negative
1322 * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1323 * changes the other way, allow runtime suspends.
1324 */
1325void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1326{
1327        int old_delay, old_use;
1328
1329        spin_lock_irq(&dev->power.lock);
1330        old_delay = dev->power.autosuspend_delay;
1331        old_use = dev->power.use_autosuspend;
1332        dev->power.autosuspend_delay = delay;
1333        update_autosuspend(dev, old_delay, old_use);
1334        spin_unlock_irq(&dev->power.lock);
1335}
1336EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1337
1338/**
1339 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1340 * @dev: Device to handle.
1341 * @use: New value for use_autosuspend.
1342 *
1343 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1344 * suspends as needed.
1345 */
1346void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1347{
1348        int old_delay, old_use;
1349
1350        spin_lock_irq(&dev->power.lock);
1351        old_delay = dev->power.autosuspend_delay;
1352        old_use = dev->power.use_autosuspend;
1353        dev->power.use_autosuspend = use;
1354        update_autosuspend(dev, old_delay, old_use);
1355        spin_unlock_irq(&dev->power.lock);
1356}
1357EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1358
1359/**
1360 * pm_runtime_init - Initialize runtime PM fields in given device object.
1361 * @dev: Device object to initialize.
1362 */
1363void pm_runtime_init(struct device *dev)
1364{
1365        dev->power.runtime_status = RPM_SUSPENDED;
1366        dev->power.idle_notification = false;
1367
1368        dev->power.disable_depth = 1;
1369        atomic_set(&dev->power.usage_count, 0);
1370
1371        dev->power.runtime_error = 0;
1372
1373        atomic_set(&dev->power.child_count, 0);
1374        pm_suspend_ignore_children(dev, false);
1375        dev->power.runtime_auto = true;
1376
1377        dev->power.request_pending = false;
1378        dev->power.request = RPM_REQ_NONE;
1379        dev->power.deferred_resume = false;
1380        dev->power.accounting_timestamp = jiffies;
1381        INIT_WORK(&dev->power.work, pm_runtime_work);
1382
1383        dev->power.timer_expires = 0;
1384        setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1385                        (unsigned long)dev);
1386
1387        init_waitqueue_head(&dev->power.wait_queue);
1388}
1389
1390/**
1391 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1392 * @dev: Device object being removed from device hierarchy.
1393 */
1394void pm_runtime_remove(struct device *dev)
1395{
1396        __pm_runtime_disable(dev, false);
1397
1398        /* Change the status back to 'suspended' to match the initial status. */
1399        if (dev->power.runtime_status == RPM_ACTIVE)
1400                pm_runtime_set_suspended(dev);
1401        if (dev->power.irq_safe && dev->parent)
1402                pm_runtime_put(dev->parent);
1403}
1404