linux/drivers/base/power/runtime.c
<<
>>
Prefs
   1/*
   2 * drivers/base/power/runtime.c - Helper functions for device run-time PM
   3 *
   4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
   5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
   6 *
   7 * This file is released under the GPLv2.
   8 */
   9
  10#include <linux/sched.h>
  11#include <linux/pm_runtime.h>
  12#include "power.h"
  13
  14static int rpm_resume(struct device *dev, int rpmflags);
  15static int rpm_suspend(struct device *dev, int rpmflags);
  16
  17/**
  18 * update_pm_runtime_accounting - Update the time accounting of power states
  19 * @dev: Device to update the accounting for
  20 *
  21 * In order to be able to have time accounting of the various power states
  22 * (as used by programs such as PowerTOP to show the effectiveness of runtime
  23 * PM), we need to track the time spent in each state.
  24 * update_pm_runtime_accounting must be called each time before the
  25 * runtime_status field is updated, to account the time in the old state
  26 * correctly.
  27 */
  28void update_pm_runtime_accounting(struct device *dev)
  29{
  30        unsigned long now = jiffies;
  31        int delta;
  32
  33        delta = now - dev->power.accounting_timestamp;
  34
  35        if (delta < 0)
  36                delta = 0;
  37
  38        dev->power.accounting_timestamp = now;
  39
  40        if (dev->power.disable_depth > 0)
  41                return;
  42
  43        if (dev->power.runtime_status == RPM_SUSPENDED)
  44                dev->power.suspended_jiffies += delta;
  45        else
  46                dev->power.active_jiffies += delta;
  47}
  48
  49static void __update_runtime_status(struct device *dev, enum rpm_status status)
  50{
  51        update_pm_runtime_accounting(dev);
  52        dev->power.runtime_status = status;
  53}
  54
  55/**
  56 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
  57 * @dev: Device to handle.
  58 */
  59static void pm_runtime_deactivate_timer(struct device *dev)
  60{
  61        if (dev->power.timer_expires > 0) {
  62                del_timer(&dev->power.suspend_timer);
  63                dev->power.timer_expires = 0;
  64        }
  65}
  66
  67/**
  68 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
  69 * @dev: Device to handle.
  70 */
  71static void pm_runtime_cancel_pending(struct device *dev)
  72{
  73        pm_runtime_deactivate_timer(dev);
  74        /*
  75         * In case there's a request pending, make sure its work function will
  76         * return without doing anything.
  77         */
  78        dev->power.request = RPM_REQ_NONE;
  79}
  80
  81/*
  82 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
  83 * @dev: Device to handle.
  84 *
  85 * Compute the autosuspend-delay expiration time based on the device's
  86 * power.last_busy time.  If the delay has already expired or is disabled
  87 * (negative) or the power.use_autosuspend flag isn't set, return 0.
  88 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
  89 *
  90 * This function may be called either with or without dev->power.lock held.
  91 * Either way it can be racy, since power.last_busy may be updated at any time.
  92 */
  93unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
  94{
  95        int autosuspend_delay;
  96        long elapsed;
  97        unsigned long last_busy;
  98        unsigned long expires = 0;
  99
 100        if (!dev->power.use_autosuspend)
 101                goto out;
 102
 103        autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
 104        if (autosuspend_delay < 0)
 105                goto out;
 106
 107        last_busy = ACCESS_ONCE(dev->power.last_busy);
 108        elapsed = jiffies - last_busy;
 109        if (elapsed < 0)
 110                goto out;       /* jiffies has wrapped around. */
 111
 112        /*
 113         * If the autosuspend_delay is >= 1 second, align the timer by rounding
 114         * up to the nearest second.
 115         */
 116        expires = last_busy + msecs_to_jiffies(autosuspend_delay);
 117        if (autosuspend_delay >= 1000)
 118                expires = round_jiffies(expires);
 119        expires += !expires;
 120        if (elapsed >= expires - last_busy)
 121                expires = 0;    /* Already expired. */
 122
 123 out:
 124        return expires;
 125}
 126EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
 127
 128/**
 129 * rpm_check_suspend_allowed - Test whether a device may be suspended.
 130 * @dev: Device to test.
 131 */
 132static int rpm_check_suspend_allowed(struct device *dev)
 133{
 134        int retval = 0;
 135
 136        if (dev->power.runtime_error)
 137                retval = -EINVAL;
 138        else if (atomic_read(&dev->power.usage_count) > 0
 139            || dev->power.disable_depth > 0)
 140                retval = -EAGAIN;
 141        else if (!pm_children_suspended(dev))
 142                retval = -EBUSY;
 143
 144        /* Pending resume requests take precedence over suspends. */
 145        else if ((dev->power.deferred_resume
 146                        && dev->power.runtime_status == RPM_SUSPENDING)
 147            || (dev->power.request_pending
 148                        && dev->power.request == RPM_REQ_RESUME))
 149                retval = -EAGAIN;
 150        else if (dev->power.runtime_status == RPM_SUSPENDED)
 151                retval = 1;
 152
 153        return retval;
 154}
 155
 156/**
 157 * rpm_idle - Notify device bus type if the device can be suspended.
 158 * @dev: Device to notify the bus type about.
 159 * @rpmflags: Flag bits.
 160 *
 161 * Check if the device's run-time PM status allows it to be suspended.  If
 162 * another idle notification has been started earlier, return immediately.  If
 163 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
 164 * run the ->runtime_idle() callback directly.
 165 *
 166 * This function must be called under dev->power.lock with interrupts disabled.
 167 */
 168static int rpm_idle(struct device *dev, int rpmflags)
 169{
 170        int (*callback)(struct device *);
 171        int retval;
 172
 173        retval = rpm_check_suspend_allowed(dev);
 174        if (retval < 0)
 175                ;       /* Conditions are wrong. */
 176
 177        /* Idle notifications are allowed only in the RPM_ACTIVE state. */
 178        else if (dev->power.runtime_status != RPM_ACTIVE)
 179                retval = -EAGAIN;
 180
 181        /*
 182         * Any pending request other than an idle notification takes
 183         * precedence over us, except that the timer may be running.
 184         */
 185        else if (dev->power.request_pending &&
 186            dev->power.request > RPM_REQ_IDLE)
 187                retval = -EAGAIN;
 188
 189        /* Act as though RPM_NOWAIT is always set. */
 190        else if (dev->power.idle_notification)
 191                retval = -EINPROGRESS;
 192        if (retval)
 193                goto out;
 194
 195        /* Pending requests need to be canceled. */
 196        dev->power.request = RPM_REQ_NONE;
 197
 198        if (dev->power.no_callbacks) {
 199                /* Assume ->runtime_idle() callback would have suspended. */
 200                retval = rpm_suspend(dev, rpmflags);
 201                goto out;
 202        }
 203
 204        /* Carry out an asynchronous or a synchronous idle notification. */
 205        if (rpmflags & RPM_ASYNC) {
 206                dev->power.request = RPM_REQ_IDLE;
 207                if (!dev->power.request_pending) {
 208                        dev->power.request_pending = true;
 209                        queue_work(pm_wq, &dev->power.work);
 210                }
 211                goto out;
 212        }
 213
 214        dev->power.idle_notification = true;
 215
 216        if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle)
 217                callback = dev->bus->pm->runtime_idle;
 218        else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle)
 219                callback = dev->type->pm->runtime_idle;
 220        else if (dev->class && dev->class->pm)
 221                callback = dev->class->pm->runtime_idle;
 222        else
 223                callback = NULL;
 224
 225        if (callback) {
 226                spin_unlock_irq(&dev->power.lock);
 227
 228                callback(dev);
 229
 230                spin_lock_irq(&dev->power.lock);
 231        }
 232
 233        dev->power.idle_notification = false;
 234        wake_up_all(&dev->power.wait_queue);
 235
 236 out:
 237        return retval;
 238}
 239
 240/**
 241 * rpm_callback - Run a given runtime PM callback for a given device.
 242 * @cb: Runtime PM callback to run.
 243 * @dev: Device to run the callback for.
 244 */
 245static int rpm_callback(int (*cb)(struct device *), struct device *dev)
 246        __releases(&dev->power.lock) __acquires(&dev->power.lock)
 247{
 248        int retval;
 249
 250        if (!cb)
 251                return -ENOSYS;
 252
 253        if (dev->power.irq_safe) {
 254                retval = cb(dev);
 255        } else {
 256                spin_unlock_irq(&dev->power.lock);
 257
 258                retval = cb(dev);
 259
 260                spin_lock_irq(&dev->power.lock);
 261        }
 262        dev->power.runtime_error = retval;
 263        return retval;
 264}
 265
 266/**
 267 * rpm_suspend - Carry out run-time suspend of given device.
 268 * @dev: Device to suspend.
 269 * @rpmflags: Flag bits.
 270 *
 271 * Check if the device's run-time PM status allows it to be suspended.  If
 272 * another suspend has been started earlier, either return immediately or wait
 273 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags.  Cancel a
 274 * pending idle notification.  If the RPM_ASYNC flag is set then queue a
 275 * suspend request; otherwise run the ->runtime_suspend() callback directly.
 276 * If a deferred resume was requested while the callback was running then carry
 277 * it out; otherwise send an idle notification for the device (if the suspend
 278 * failed) or for its parent (if the suspend succeeded).
 279 *
 280 * This function must be called under dev->power.lock with interrupts disabled.
 281 */
 282static int rpm_suspend(struct device *dev, int rpmflags)
 283        __releases(&dev->power.lock) __acquires(&dev->power.lock)
 284{
 285        int (*callback)(struct device *);
 286        struct device *parent = NULL;
 287        int retval;
 288
 289        dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
 290
 291 repeat:
 292        retval = rpm_check_suspend_allowed(dev);
 293
 294        if (retval < 0)
 295                ;       /* Conditions are wrong. */
 296
 297        /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
 298        else if (dev->power.runtime_status == RPM_RESUMING &&
 299            !(rpmflags & RPM_ASYNC))
 300                retval = -EAGAIN;
 301        if (retval)
 302                goto out;
 303
 304        /* If the autosuspend_delay time hasn't expired yet, reschedule. */
 305        if ((rpmflags & RPM_AUTO)
 306            && dev->power.runtime_status != RPM_SUSPENDING) {
 307                unsigned long expires = pm_runtime_autosuspend_expiration(dev);
 308
 309                if (expires != 0) {
 310                        /* Pending requests need to be canceled. */
 311                        dev->power.request = RPM_REQ_NONE;
 312
 313                        /*
 314                         * Optimization: If the timer is already running and is
 315                         * set to expire at or before the autosuspend delay,
 316                         * avoid the overhead of resetting it.  Just let it
 317                         * expire; pm_suspend_timer_fn() will take care of the
 318                         * rest.
 319                         */
 320                        if (!(dev->power.timer_expires && time_before_eq(
 321                            dev->power.timer_expires, expires))) {
 322                                dev->power.timer_expires = expires;
 323                                mod_timer(&dev->power.suspend_timer, expires);
 324                        }
 325                        dev->power.timer_autosuspends = 1;
 326                        goto out;
 327                }
 328        }
 329
 330        /* Other scheduled or pending requests need to be canceled. */
 331        pm_runtime_cancel_pending(dev);
 332
 333        if (dev->power.runtime_status == RPM_SUSPENDING) {
 334                DEFINE_WAIT(wait);
 335
 336                if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 337                        retval = -EINPROGRESS;
 338                        goto out;
 339                }
 340
 341                /* Wait for the other suspend running in parallel with us. */
 342                for (;;) {
 343                        prepare_to_wait(&dev->power.wait_queue, &wait,
 344                                        TASK_UNINTERRUPTIBLE);
 345                        if (dev->power.runtime_status != RPM_SUSPENDING)
 346                                break;
 347
 348                        spin_unlock_irq(&dev->power.lock);
 349
 350                        schedule();
 351
 352                        spin_lock_irq(&dev->power.lock);
 353                }
 354                finish_wait(&dev->power.wait_queue, &wait);
 355                goto repeat;
 356        }
 357
 358        dev->power.deferred_resume = false;
 359        if (dev->power.no_callbacks)
 360                goto no_callback;       /* Assume success. */
 361
 362        /* Carry out an asynchronous or a synchronous suspend. */
 363        if (rpmflags & RPM_ASYNC) {
 364                dev->power.request = (rpmflags & RPM_AUTO) ?
 365                    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
 366                if (!dev->power.request_pending) {
 367                        dev->power.request_pending = true;
 368                        queue_work(pm_wq, &dev->power.work);
 369                }
 370                goto out;
 371        }
 372
 373        __update_runtime_status(dev, RPM_SUSPENDING);
 374
 375        if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
 376                callback = dev->bus->pm->runtime_suspend;
 377        else if (dev->type && dev->type->pm && dev->type->pm->runtime_suspend)
 378                callback = dev->type->pm->runtime_suspend;
 379        else if (dev->class && dev->class->pm)
 380                callback = dev->class->pm->runtime_suspend;
 381        else
 382                callback = NULL;
 383
 384        retval = rpm_callback(callback, dev);
 385        if (retval) {
 386                __update_runtime_status(dev, RPM_ACTIVE);
 387                dev->power.deferred_resume = 0;
 388                if (retval == -EAGAIN || retval == -EBUSY)
 389                        dev->power.runtime_error = 0;
 390                else
 391                        pm_runtime_cancel_pending(dev);
 392        } else {
 393 no_callback:
 394                __update_runtime_status(dev, RPM_SUSPENDED);
 395                pm_runtime_deactivate_timer(dev);
 396
 397                if (dev->parent) {
 398                        parent = dev->parent;
 399                        atomic_add_unless(&parent->power.child_count, -1, 0);
 400                }
 401        }
 402        wake_up_all(&dev->power.wait_queue);
 403
 404        if (dev->power.deferred_resume) {
 405                rpm_resume(dev, 0);
 406                retval = -EAGAIN;
 407                goto out;
 408        }
 409
 410        /* Maybe the parent is now able to suspend. */
 411        if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
 412                spin_unlock(&dev->power.lock);
 413
 414                spin_lock(&parent->power.lock);
 415                rpm_idle(parent, RPM_ASYNC);
 416                spin_unlock(&parent->power.lock);
 417
 418                spin_lock(&dev->power.lock);
 419        }
 420
 421 out:
 422        dev_dbg(dev, "%s returns %d\n", __func__, retval);
 423
 424        return retval;
 425}
 426
 427/**
 428 * rpm_resume - Carry out run-time resume of given device.
 429 * @dev: Device to resume.
 430 * @rpmflags: Flag bits.
 431 *
 432 * Check if the device's run-time PM status allows it to be resumed.  Cancel
 433 * any scheduled or pending requests.  If another resume has been started
 434 * earlier, either return imediately or wait for it to finish, depending on the
 435 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
 436 * parallel with this function, either tell the other process to resume after
 437 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
 438 * flag is set then queue a resume request; otherwise run the
 439 * ->runtime_resume() callback directly.  Queue an idle notification for the
 440 * device if the resume succeeded.
 441 *
 442 * This function must be called under dev->power.lock with interrupts disabled.
 443 */
 444static int rpm_resume(struct device *dev, int rpmflags)
 445        __releases(&dev->power.lock) __acquires(&dev->power.lock)
 446{
 447        int (*callback)(struct device *);
 448        struct device *parent = NULL;
 449        int retval = 0;
 450
 451        dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
 452
 453 repeat:
 454        if (dev->power.runtime_error)
 455                retval = -EINVAL;
 456        else if (dev->power.disable_depth > 0)
 457                retval = -EAGAIN;
 458        if (retval)
 459                goto out;
 460
 461        /*
 462         * Other scheduled or pending requests need to be canceled.  Small
 463         * optimization: If an autosuspend timer is running, leave it running
 464         * rather than cancelling it now only to restart it again in the near
 465         * future.
 466         */
 467        dev->power.request = RPM_REQ_NONE;
 468        if (!dev->power.timer_autosuspends)
 469                pm_runtime_deactivate_timer(dev);
 470
 471        if (dev->power.runtime_status == RPM_ACTIVE) {
 472                retval = 1;
 473                goto out;
 474        }
 475
 476        if (dev->power.runtime_status == RPM_RESUMING
 477            || dev->power.runtime_status == RPM_SUSPENDING) {
 478                DEFINE_WAIT(wait);
 479
 480                if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
 481                        if (dev->power.runtime_status == RPM_SUSPENDING)
 482                                dev->power.deferred_resume = true;
 483                        else
 484                                retval = -EINPROGRESS;
 485                        goto out;
 486                }
 487
 488                /* Wait for the operation carried out in parallel with us. */
 489                for (;;) {
 490                        prepare_to_wait(&dev->power.wait_queue, &wait,
 491                                        TASK_UNINTERRUPTIBLE);
 492                        if (dev->power.runtime_status != RPM_RESUMING
 493                            && dev->power.runtime_status != RPM_SUSPENDING)
 494                                break;
 495
 496                        spin_unlock_irq(&dev->power.lock);
 497
 498                        schedule();
 499
 500                        spin_lock_irq(&dev->power.lock);
 501                }
 502                finish_wait(&dev->power.wait_queue, &wait);
 503                goto repeat;
 504        }
 505
 506        /*
 507         * See if we can skip waking up the parent.  This is safe only if
 508         * power.no_callbacks is set, because otherwise we don't know whether
 509         * the resume will actually succeed.
 510         */
 511        if (dev->power.no_callbacks && !parent && dev->parent) {
 512                spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
 513                if (dev->parent->power.disable_depth > 0
 514                    || dev->parent->power.ignore_children
 515                    || dev->parent->power.runtime_status == RPM_ACTIVE) {
 516                        atomic_inc(&dev->parent->power.child_count);
 517                        spin_unlock(&dev->parent->power.lock);
 518                        goto no_callback;       /* Assume success. */
 519                }
 520                spin_unlock(&dev->parent->power.lock);
 521        }
 522
 523        /* Carry out an asynchronous or a synchronous resume. */
 524        if (rpmflags & RPM_ASYNC) {
 525                dev->power.request = RPM_REQ_RESUME;
 526                if (!dev->power.request_pending) {
 527                        dev->power.request_pending = true;
 528                        queue_work(pm_wq, &dev->power.work);
 529                }
 530                retval = 0;
 531                goto out;
 532        }
 533
 534        if (!parent && dev->parent) {
 535                /*
 536                 * Increment the parent's usage counter and resume it if
 537                 * necessary.  Not needed if dev is irq-safe; then the
 538                 * parent is permanently resumed.
 539                 */
 540                parent = dev->parent;
 541                if (dev->power.irq_safe)
 542                        goto skip_parent;
 543                spin_unlock(&dev->power.lock);
 544
 545                pm_runtime_get_noresume(parent);
 546
 547                spin_lock(&parent->power.lock);
 548                /*
 549                 * We can resume if the parent's run-time PM is disabled or it
 550                 * is set to ignore children.
 551                 */
 552                if (!parent->power.disable_depth
 553                    && !parent->power.ignore_children) {
 554                        rpm_resume(parent, 0);
 555                        if (parent->power.runtime_status != RPM_ACTIVE)
 556                                retval = -EBUSY;
 557                }
 558                spin_unlock(&parent->power.lock);
 559
 560                spin_lock(&dev->power.lock);
 561                if (retval)
 562                        goto out;
 563                goto repeat;
 564        }
 565 skip_parent:
 566
 567        if (dev->power.no_callbacks)
 568                goto no_callback;       /* Assume success. */
 569
 570        __update_runtime_status(dev, RPM_RESUMING);
 571
 572        if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
 573                callback = dev->bus->pm->runtime_resume;
 574        else if (dev->type && dev->type->pm && dev->type->pm->runtime_resume)
 575                callback = dev->type->pm->runtime_resume;
 576        else if (dev->class && dev->class->pm)
 577                callback = dev->class->pm->runtime_resume;
 578        else
 579                callback = NULL;
 580
 581        retval = rpm_callback(callback, dev);
 582        if (retval) {
 583                __update_runtime_status(dev, RPM_SUSPENDED);
 584                pm_runtime_cancel_pending(dev);
 585        } else {
 586 no_callback:
 587                __update_runtime_status(dev, RPM_ACTIVE);
 588                if (parent)
 589                        atomic_inc(&parent->power.child_count);
 590        }
 591        wake_up_all(&dev->power.wait_queue);
 592
 593        if (!retval)
 594                rpm_idle(dev, RPM_ASYNC);
 595
 596 out:
 597        if (parent && !dev->power.irq_safe) {
 598                spin_unlock_irq(&dev->power.lock);
 599
 600                pm_runtime_put(parent);
 601
 602                spin_lock_irq(&dev->power.lock);
 603        }
 604
 605        dev_dbg(dev, "%s returns %d\n", __func__, retval);
 606
 607        return retval;
 608}
 609
 610/**
 611 * pm_runtime_work - Universal run-time PM work function.
 612 * @work: Work structure used for scheduling the execution of this function.
 613 *
 614 * Use @work to get the device object the work is to be done for, determine what
 615 * is to be done and execute the appropriate run-time PM function.
 616 */
 617static void pm_runtime_work(struct work_struct *work)
 618{
 619        struct device *dev = container_of(work, struct device, power.work);
 620        enum rpm_request req;
 621
 622        spin_lock_irq(&dev->power.lock);
 623
 624        if (!dev->power.request_pending)
 625                goto out;
 626
 627        req = dev->power.request;
 628        dev->power.request = RPM_REQ_NONE;
 629        dev->power.request_pending = false;
 630
 631        switch (req) {
 632        case RPM_REQ_NONE:
 633                break;
 634        case RPM_REQ_IDLE:
 635                rpm_idle(dev, RPM_NOWAIT);
 636                break;
 637        case RPM_REQ_SUSPEND:
 638                rpm_suspend(dev, RPM_NOWAIT);
 639                break;
 640        case RPM_REQ_AUTOSUSPEND:
 641                rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
 642                break;
 643        case RPM_REQ_RESUME:
 644                rpm_resume(dev, RPM_NOWAIT);
 645                break;
 646        }
 647
 648 out:
 649        spin_unlock_irq(&dev->power.lock);
 650}
 651
 652/**
 653 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
 654 * @data: Device pointer passed by pm_schedule_suspend().
 655 *
 656 * Check if the time is right and queue a suspend request.
 657 */
 658static void pm_suspend_timer_fn(unsigned long data)
 659{
 660        struct device *dev = (struct device *)data;
 661        unsigned long flags;
 662        unsigned long expires;
 663
 664        spin_lock_irqsave(&dev->power.lock, flags);
 665
 666        expires = dev->power.timer_expires;
 667        /* If 'expire' is after 'jiffies' we've been called too early. */
 668        if (expires > 0 && !time_after(expires, jiffies)) {
 669                dev->power.timer_expires = 0;
 670                rpm_suspend(dev, dev->power.timer_autosuspends ?
 671                    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
 672        }
 673
 674        spin_unlock_irqrestore(&dev->power.lock, flags);
 675}
 676
 677/**
 678 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
 679 * @dev: Device to suspend.
 680 * @delay: Time to wait before submitting a suspend request, in milliseconds.
 681 */
 682int pm_schedule_suspend(struct device *dev, unsigned int delay)
 683{
 684        unsigned long flags;
 685        int retval;
 686
 687        spin_lock_irqsave(&dev->power.lock, flags);
 688
 689        if (!delay) {
 690                retval = rpm_suspend(dev, RPM_ASYNC);
 691                goto out;
 692        }
 693
 694        retval = rpm_check_suspend_allowed(dev);
 695        if (retval)
 696                goto out;
 697
 698        /* Other scheduled or pending requests need to be canceled. */
 699        pm_runtime_cancel_pending(dev);
 700
 701        dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
 702        dev->power.timer_expires += !dev->power.timer_expires;
 703        dev->power.timer_autosuspends = 0;
 704        mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
 705
 706 out:
 707        spin_unlock_irqrestore(&dev->power.lock, flags);
 708
 709        return retval;
 710}
 711EXPORT_SYMBOL_GPL(pm_schedule_suspend);
 712
 713/**
 714 * __pm_runtime_idle - Entry point for run-time idle operations.
 715 * @dev: Device to send idle notification for.
 716 * @rpmflags: Flag bits.
 717 *
 718 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 719 * return immediately if it is larger than zero.  Then carry out an idle
 720 * notification, either synchronous or asynchronous.
 721 *
 722 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
 723 */
 724int __pm_runtime_idle(struct device *dev, int rpmflags)
 725{
 726        unsigned long flags;
 727        int retval;
 728
 729        if (rpmflags & RPM_GET_PUT) {
 730                if (!atomic_dec_and_test(&dev->power.usage_count))
 731                        return 0;
 732        }
 733
 734        spin_lock_irqsave(&dev->power.lock, flags);
 735        retval = rpm_idle(dev, rpmflags);
 736        spin_unlock_irqrestore(&dev->power.lock, flags);
 737
 738        return retval;
 739}
 740EXPORT_SYMBOL_GPL(__pm_runtime_idle);
 741
 742/**
 743 * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
 744 * @dev: Device to suspend.
 745 * @rpmflags: Flag bits.
 746 *
 747 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
 748 * return immediately if it is larger than zero.  Then carry out a suspend,
 749 * either synchronous or asynchronous.
 750 *
 751 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
 752 */
 753int __pm_runtime_suspend(struct device *dev, int rpmflags)
 754{
 755        unsigned long flags;
 756        int retval;
 757
 758        if (rpmflags & RPM_GET_PUT) {
 759                if (!atomic_dec_and_test(&dev->power.usage_count))
 760                        return 0;
 761        }
 762
 763        spin_lock_irqsave(&dev->power.lock, flags);
 764        retval = rpm_suspend(dev, rpmflags);
 765        spin_unlock_irqrestore(&dev->power.lock, flags);
 766
 767        return retval;
 768}
 769EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
 770
 771/**
 772 * __pm_runtime_resume - Entry point for run-time resume operations.
 773 * @dev: Device to resume.
 774 * @rpmflags: Flag bits.
 775 *
 776 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
 777 * carry out a resume, either synchronous or asynchronous.
 778 *
 779 * This routine may be called in atomic context if the RPM_ASYNC flag is set.
 780 */
 781int __pm_runtime_resume(struct device *dev, int rpmflags)
 782{
 783        unsigned long flags;
 784        int retval;
 785
 786        if (rpmflags & RPM_GET_PUT)
 787                atomic_inc(&dev->power.usage_count);
 788
 789        spin_lock_irqsave(&dev->power.lock, flags);
 790        retval = rpm_resume(dev, rpmflags);
 791        spin_unlock_irqrestore(&dev->power.lock, flags);
 792
 793        return retval;
 794}
 795EXPORT_SYMBOL_GPL(__pm_runtime_resume);
 796
 797/**
 798 * __pm_runtime_set_status - Set run-time PM status of a device.
 799 * @dev: Device to handle.
 800 * @status: New run-time PM status of the device.
 801 *
 802 * If run-time PM of the device is disabled or its power.runtime_error field is
 803 * different from zero, the status may be changed either to RPM_ACTIVE, or to
 804 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
 805 * However, if the device has a parent and the parent is not active, and the
 806 * parent's power.ignore_children flag is unset, the device's status cannot be
 807 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
 808 *
 809 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
 810 * and the device parent's counter of unsuspended children is modified to
 811 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
 812 * notification request for the parent is submitted.
 813 */
 814int __pm_runtime_set_status(struct device *dev, unsigned int status)
 815{
 816        struct device *parent = dev->parent;
 817        unsigned long flags;
 818        bool notify_parent = false;
 819        int error = 0;
 820
 821        if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
 822                return -EINVAL;
 823
 824        spin_lock_irqsave(&dev->power.lock, flags);
 825
 826        if (!dev->power.runtime_error && !dev->power.disable_depth) {
 827                error = -EAGAIN;
 828                goto out;
 829        }
 830
 831        if (dev->power.runtime_status == status)
 832                goto out_set;
 833
 834        if (status == RPM_SUSPENDED) {
 835                /* It always is possible to set the status to 'suspended'. */
 836                if (parent) {
 837                        atomic_add_unless(&parent->power.child_count, -1, 0);
 838                        notify_parent = !parent->power.ignore_children;
 839                }
 840                goto out_set;
 841        }
 842
 843        if (parent) {
 844                spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
 845
 846                /*
 847                 * It is invalid to put an active child under a parent that is
 848                 * not active, has run-time PM enabled and the
 849                 * 'power.ignore_children' flag unset.
 850                 */
 851                if (!parent->power.disable_depth
 852                    && !parent->power.ignore_children
 853                    && parent->power.runtime_status != RPM_ACTIVE)
 854                        error = -EBUSY;
 855                else if (dev->power.runtime_status == RPM_SUSPENDED)
 856                        atomic_inc(&parent->power.child_count);
 857
 858                spin_unlock(&parent->power.lock);
 859
 860                if (error)
 861                        goto out;
 862        }
 863
 864 out_set:
 865        __update_runtime_status(dev, status);
 866        dev->power.runtime_error = 0;
 867 out:
 868        spin_unlock_irqrestore(&dev->power.lock, flags);
 869
 870        if (notify_parent)
 871                pm_request_idle(parent);
 872
 873        return error;
 874}
 875EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
 876
 877/**
 878 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
 879 * @dev: Device to handle.
 880 *
 881 * Flush all pending requests for the device from pm_wq and wait for all
 882 * run-time PM operations involving the device in progress to complete.
 883 *
 884 * Should be called under dev->power.lock with interrupts disabled.
 885 */
 886static void __pm_runtime_barrier(struct device *dev)
 887{
 888        pm_runtime_deactivate_timer(dev);
 889
 890        if (dev->power.request_pending) {
 891                dev->power.request = RPM_REQ_NONE;
 892                spin_unlock_irq(&dev->power.lock);
 893
 894                cancel_work_sync(&dev->power.work);
 895
 896                spin_lock_irq(&dev->power.lock);
 897                dev->power.request_pending = false;
 898        }
 899
 900        if (dev->power.runtime_status == RPM_SUSPENDING
 901            || dev->power.runtime_status == RPM_RESUMING
 902            || dev->power.idle_notification) {
 903                DEFINE_WAIT(wait);
 904
 905                /* Suspend, wake-up or idle notification in progress. */
 906                for (;;) {
 907                        prepare_to_wait(&dev->power.wait_queue, &wait,
 908                                        TASK_UNINTERRUPTIBLE);
 909                        if (dev->power.runtime_status != RPM_SUSPENDING
 910                            && dev->power.runtime_status != RPM_RESUMING
 911                            && !dev->power.idle_notification)
 912                                break;
 913                        spin_unlock_irq(&dev->power.lock);
 914
 915                        schedule();
 916
 917                        spin_lock_irq(&dev->power.lock);
 918                }
 919                finish_wait(&dev->power.wait_queue, &wait);
 920        }
 921}
 922
 923/**
 924 * pm_runtime_barrier - Flush pending requests and wait for completions.
 925 * @dev: Device to handle.
 926 *
 927 * Prevent the device from being suspended by incrementing its usage counter and
 928 * if there's a pending resume request for the device, wake the device up.
 929 * Next, make sure that all pending requests for the device have been flushed
 930 * from pm_wq and wait for all run-time PM operations involving the device in
 931 * progress to complete.
 932 *
 933 * Return value:
 934 * 1, if there was a resume request pending and the device had to be woken up,
 935 * 0, otherwise
 936 */
 937int pm_runtime_barrier(struct device *dev)
 938{
 939        int retval = 0;
 940
 941        pm_runtime_get_noresume(dev);
 942        spin_lock_irq(&dev->power.lock);
 943
 944        if (dev->power.request_pending
 945            && dev->power.request == RPM_REQ_RESUME) {
 946                rpm_resume(dev, 0);
 947                retval = 1;
 948        }
 949
 950        __pm_runtime_barrier(dev);
 951
 952        spin_unlock_irq(&dev->power.lock);
 953        pm_runtime_put_noidle(dev);
 954
 955        return retval;
 956}
 957EXPORT_SYMBOL_GPL(pm_runtime_barrier);
 958
 959/**
 960 * __pm_runtime_disable - Disable run-time PM of a device.
 961 * @dev: Device to handle.
 962 * @check_resume: If set, check if there's a resume request for the device.
 963 *
 964 * Increment power.disable_depth for the device and if was zero previously,
 965 * cancel all pending run-time PM requests for the device and wait for all
 966 * operations in progress to complete.  The device can be either active or
 967 * suspended after its run-time PM has been disabled.
 968 *
 969 * If @check_resume is set and there's a resume request pending when
 970 * __pm_runtime_disable() is called and power.disable_depth is zero, the
 971 * function will wake up the device before disabling its run-time PM.
 972 */
 973void __pm_runtime_disable(struct device *dev, bool check_resume)
 974{
 975        spin_lock_irq(&dev->power.lock);
 976
 977        if (dev->power.disable_depth > 0) {
 978                dev->power.disable_depth++;
 979                goto out;
 980        }
 981
 982        /*
 983         * Wake up the device if there's a resume request pending, because that
 984         * means there probably is some I/O to process and disabling run-time PM
 985         * shouldn't prevent the device from processing the I/O.
 986         */
 987        if (check_resume && dev->power.request_pending
 988            && dev->power.request == RPM_REQ_RESUME) {
 989                /*
 990                 * Prevent suspends and idle notifications from being carried
 991                 * out after we have woken up the device.
 992                 */
 993                pm_runtime_get_noresume(dev);
 994
 995                rpm_resume(dev, 0);
 996
 997                pm_runtime_put_noidle(dev);
 998        }
 999
1000        if (!dev->power.disable_depth++)
1001                __pm_runtime_barrier(dev);
1002
1003 out:
1004        spin_unlock_irq(&dev->power.lock);
1005}
1006EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1007
1008/**
1009 * pm_runtime_enable - Enable run-time PM of a device.
1010 * @dev: Device to handle.
1011 */
1012void pm_runtime_enable(struct device *dev)
1013{
1014        unsigned long flags;
1015
1016        spin_lock_irqsave(&dev->power.lock, flags);
1017
1018        if (dev->power.disable_depth > 0)
1019                dev->power.disable_depth--;
1020        else
1021                dev_warn(dev, "Unbalanced %s!\n", __func__);
1022
1023        spin_unlock_irqrestore(&dev->power.lock, flags);
1024}
1025EXPORT_SYMBOL_GPL(pm_runtime_enable);
1026
1027/**
1028 * pm_runtime_forbid - Block run-time PM of a device.
1029 * @dev: Device to handle.
1030 *
1031 * Increase the device's usage count and clear its power.runtime_auto flag,
1032 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1033 * for it.
1034 */
1035void pm_runtime_forbid(struct device *dev)
1036{
1037        spin_lock_irq(&dev->power.lock);
1038        if (!dev->power.runtime_auto)
1039                goto out;
1040
1041        dev->power.runtime_auto = false;
1042        atomic_inc(&dev->power.usage_count);
1043        rpm_resume(dev, 0);
1044
1045 out:
1046        spin_unlock_irq(&dev->power.lock);
1047}
1048EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1049
1050/**
1051 * pm_runtime_allow - Unblock run-time PM of a device.
1052 * @dev: Device to handle.
1053 *
1054 * Decrease the device's usage count and set its power.runtime_auto flag.
1055 */
1056void pm_runtime_allow(struct device *dev)
1057{
1058        spin_lock_irq(&dev->power.lock);
1059        if (dev->power.runtime_auto)
1060                goto out;
1061
1062        dev->power.runtime_auto = true;
1063        if (atomic_dec_and_test(&dev->power.usage_count))
1064                rpm_idle(dev, RPM_AUTO);
1065
1066 out:
1067        spin_unlock_irq(&dev->power.lock);
1068}
1069EXPORT_SYMBOL_GPL(pm_runtime_allow);
1070
1071/**
1072 * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
1073 * @dev: Device to handle.
1074 *
1075 * Set the power.no_callbacks flag, which tells the PM core that this
1076 * device is power-managed through its parent and has no run-time PM
1077 * callbacks of its own.  The run-time sysfs attributes will be removed.
1078 */
1079void pm_runtime_no_callbacks(struct device *dev)
1080{
1081        spin_lock_irq(&dev->power.lock);
1082        dev->power.no_callbacks = 1;
1083        spin_unlock_irq(&dev->power.lock);
1084        if (device_is_registered(dev))
1085                rpm_sysfs_remove(dev);
1086}
1087EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1088
1089/**
1090 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1091 * @dev: Device to handle
1092 *
1093 * Set the power.irq_safe flag, which tells the PM core that the
1094 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1095 * always be invoked with the spinlock held and interrupts disabled.  It also
1096 * causes the parent's usage counter to be permanently incremented, preventing
1097 * the parent from runtime suspending -- otherwise an irq-safe child might have
1098 * to wait for a non-irq-safe parent.
1099 */
1100void pm_runtime_irq_safe(struct device *dev)
1101{
1102        if (dev->parent)
1103                pm_runtime_get_sync(dev->parent);
1104        spin_lock_irq(&dev->power.lock);
1105        dev->power.irq_safe = 1;
1106        spin_unlock_irq(&dev->power.lock);
1107}
1108EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1109
1110/**
1111 * update_autosuspend - Handle a change to a device's autosuspend settings.
1112 * @dev: Device to handle.
1113 * @old_delay: The former autosuspend_delay value.
1114 * @old_use: The former use_autosuspend value.
1115 *
1116 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1117 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1118 *
1119 * This function must be called under dev->power.lock with interrupts disabled.
1120 */
1121static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1122{
1123        int delay = dev->power.autosuspend_delay;
1124
1125        /* Should runtime suspend be prevented now? */
1126        if (dev->power.use_autosuspend && delay < 0) {
1127
1128                /* If it used to be allowed then prevent it. */
1129                if (!old_use || old_delay >= 0) {
1130                        atomic_inc(&dev->power.usage_count);
1131                        rpm_resume(dev, 0);
1132                }
1133        }
1134
1135        /* Runtime suspend should be allowed now. */
1136        else {
1137
1138                /* If it used to be prevented then allow it. */
1139                if (old_use && old_delay < 0)
1140                        atomic_dec(&dev->power.usage_count);
1141
1142                /* Maybe we can autosuspend now. */
1143                rpm_idle(dev, RPM_AUTO);
1144        }
1145}
1146
1147/**
1148 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1149 * @dev: Device to handle.
1150 * @delay: Value of the new delay in milliseconds.
1151 *
1152 * Set the device's power.autosuspend_delay value.  If it changes to negative
1153 * and the power.use_autosuspend flag is set, prevent run-time suspends.  If it
1154 * changes the other way, allow run-time suspends.
1155 */
1156void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1157{
1158        int old_delay, old_use;
1159
1160        spin_lock_irq(&dev->power.lock);
1161        old_delay = dev->power.autosuspend_delay;
1162        old_use = dev->power.use_autosuspend;
1163        dev->power.autosuspend_delay = delay;
1164        update_autosuspend(dev, old_delay, old_use);
1165        spin_unlock_irq(&dev->power.lock);
1166}
1167EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1168
1169/**
1170 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1171 * @dev: Device to handle.
1172 * @use: New value for use_autosuspend.
1173 *
1174 * Set the device's power.use_autosuspend flag, and allow or prevent run-time
1175 * suspends as needed.
1176 */
1177void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1178{
1179        int old_delay, old_use;
1180
1181        spin_lock_irq(&dev->power.lock);
1182        old_delay = dev->power.autosuspend_delay;
1183        old_use = dev->power.use_autosuspend;
1184        dev->power.use_autosuspend = use;
1185        update_autosuspend(dev, old_delay, old_use);
1186        spin_unlock_irq(&dev->power.lock);
1187}
1188EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1189
1190/**
1191 * pm_runtime_init - Initialize run-time PM fields in given device object.
1192 * @dev: Device object to initialize.
1193 */
1194void pm_runtime_init(struct device *dev)
1195{
1196        dev->power.runtime_status = RPM_SUSPENDED;
1197        dev->power.idle_notification = false;
1198
1199        dev->power.disable_depth = 1;
1200        atomic_set(&dev->power.usage_count, 0);
1201
1202        dev->power.runtime_error = 0;
1203
1204        atomic_set(&dev->power.child_count, 0);
1205        pm_suspend_ignore_children(dev, false);
1206        dev->power.runtime_auto = true;
1207
1208        dev->power.request_pending = false;
1209        dev->power.request = RPM_REQ_NONE;
1210        dev->power.deferred_resume = false;
1211        dev->power.accounting_timestamp = jiffies;
1212        INIT_WORK(&dev->power.work, pm_runtime_work);
1213
1214        dev->power.timer_expires = 0;
1215        setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1216                        (unsigned long)dev);
1217
1218        init_waitqueue_head(&dev->power.wait_queue);
1219}
1220
1221/**
1222 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1223 * @dev: Device object being removed from device hierarchy.
1224 */
1225void pm_runtime_remove(struct device *dev)
1226{
1227        __pm_runtime_disable(dev, false);
1228
1229        /* Change the status back to 'suspended' to match the initial status. */
1230        if (dev->power.runtime_status == RPM_ACTIVE)
1231                pm_runtime_set_suspended(dev);
1232        if (dev->power.irq_safe && dev->parent)
1233                pm_runtime_put_sync(dev->parent);
1234}
1235