linux/drivers/base/power/main.c
<<
>>
Prefs
   1/*
   2 * drivers/base/power/main.c - Where the driver meets power management.
   3 *
   4 * Copyright (c) 2003 Patrick Mochel
   5 * Copyright (c) 2003 Open Source Development Lab
   6 *
   7 * This file is released under the GPLv2
   8 *
   9 *
  10 * The driver model core calls device_pm_add() when a device is registered.
  11 * This will initialize the embedded device_pm_info object in the device
  12 * and add it to the list of power-controlled devices. sysfs entries for
  13 * controlling device power management will also be added.
  14 *
  15 * A separate list is used for keeping track of power info, because the power
  16 * domain dependencies may differ from the ancestral dependencies that the
  17 * subsystem list maintains.
  18 */
  19
  20#include <linux/device.h>
  21#include <linux/kallsyms.h>
  22#include <linux/export.h>
  23#include <linux/mutex.h>
  24#include <linux/pm.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/pm-trace.h>
  27#include <linux/pm_wakeirq.h>
  28#include <linux/interrupt.h>
  29#include <linux/sched.h>
  30#include <linux/sched/debug.h>
  31#include <linux/async.h>
  32#include <linux/suspend.h>
  33#include <trace/events/power.h>
  34#include <linux/cpufreq.h>
  35#include <linux/cpuidle.h>
  36#include <linux/timer.h>
  37
  38#include "../base.h"
  39#include "power.h"
  40
  41typedef int (*pm_callback_t)(struct device *);
  42
  43/*
  44 * The entries in the dpm_list list are in a depth first order, simply
  45 * because children are guaranteed to be discovered after parents, and
  46 * are inserted at the back of the list on discovery.
  47 *
  48 * Since device_pm_add() may be called with a device lock held,
  49 * we must never try to acquire a device lock while holding
  50 * dpm_list_mutex.
  51 */
  52
  53LIST_HEAD(dpm_list);
  54static LIST_HEAD(dpm_prepared_list);
  55static LIST_HEAD(dpm_suspended_list);
  56static LIST_HEAD(dpm_late_early_list);
  57static LIST_HEAD(dpm_noirq_list);
  58
  59struct suspend_stats suspend_stats;
  60static DEFINE_MUTEX(dpm_list_mtx);
  61static pm_message_t pm_transition;
  62
  63static int async_error;
  64
  65static const char *pm_verb(int event)
  66{
  67        switch (event) {
  68        case PM_EVENT_SUSPEND:
  69                return "suspend";
  70        case PM_EVENT_RESUME:
  71                return "resume";
  72        case PM_EVENT_FREEZE:
  73                return "freeze";
  74        case PM_EVENT_QUIESCE:
  75                return "quiesce";
  76        case PM_EVENT_HIBERNATE:
  77                return "hibernate";
  78        case PM_EVENT_THAW:
  79                return "thaw";
  80        case PM_EVENT_RESTORE:
  81                return "restore";
  82        case PM_EVENT_RECOVER:
  83                return "recover";
  84        default:
  85                return "(unknown PM event)";
  86        }
  87}
  88
  89/**
  90 * device_pm_sleep_init - Initialize system suspend-related device fields.
  91 * @dev: Device object being initialized.
  92 */
  93void device_pm_sleep_init(struct device *dev)
  94{
  95        dev->power.is_prepared = false;
  96        dev->power.is_suspended = false;
  97        dev->power.is_noirq_suspended = false;
  98        dev->power.is_late_suspended = false;
  99        init_completion(&dev->power.completion);
 100        complete_all(&dev->power.completion);
 101        dev->power.wakeup = NULL;
 102        INIT_LIST_HEAD(&dev->power.entry);
 103}
 104
 105/**
 106 * device_pm_lock - Lock the list of active devices used by the PM core.
 107 */
 108void device_pm_lock(void)
 109{
 110        mutex_lock(&dpm_list_mtx);
 111}
 112
 113/**
 114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 115 */
 116void device_pm_unlock(void)
 117{
 118        mutex_unlock(&dpm_list_mtx);
 119}
 120
 121/**
 122 * device_pm_add - Add a device to the PM core's list of active devices.
 123 * @dev: Device to add to the list.
 124 */
 125void device_pm_add(struct device *dev)
 126{
 127        pr_debug("PM: Adding info for %s:%s\n",
 128                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 129        device_pm_check_callbacks(dev);
 130        mutex_lock(&dpm_list_mtx);
 131        if (dev->parent && dev->parent->power.is_prepared)
 132                dev_warn(dev, "parent %s should not be sleeping\n",
 133                        dev_name(dev->parent));
 134        list_add_tail(&dev->power.entry, &dpm_list);
 135        dev->power.in_dpm_list = true;
 136        mutex_unlock(&dpm_list_mtx);
 137}
 138
 139/**
 140 * device_pm_remove - Remove a device from the PM core's list of active devices.
 141 * @dev: Device to be removed from the list.
 142 */
 143void device_pm_remove(struct device *dev)
 144{
 145        pr_debug("PM: Removing info for %s:%s\n",
 146                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 147        complete_all(&dev->power.completion);
 148        mutex_lock(&dpm_list_mtx);
 149        list_del_init(&dev->power.entry);
 150        dev->power.in_dpm_list = false;
 151        mutex_unlock(&dpm_list_mtx);
 152        device_wakeup_disable(dev);
 153        pm_runtime_remove(dev);
 154        device_pm_check_callbacks(dev);
 155}
 156
 157/**
 158 * device_pm_move_before - Move device in the PM core's list of active devices.
 159 * @deva: Device to move in dpm_list.
 160 * @devb: Device @deva should come before.
 161 */
 162void device_pm_move_before(struct device *deva, struct device *devb)
 163{
 164        pr_debug("PM: Moving %s:%s before %s:%s\n",
 165                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 166                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 167        /* Delete deva from dpm_list and reinsert before devb. */
 168        list_move_tail(&deva->power.entry, &devb->power.entry);
 169}
 170
 171/**
 172 * device_pm_move_after - Move device in the PM core's list of active devices.
 173 * @deva: Device to move in dpm_list.
 174 * @devb: Device @deva should come after.
 175 */
 176void device_pm_move_after(struct device *deva, struct device *devb)
 177{
 178        pr_debug("PM: Moving %s:%s after %s:%s\n",
 179                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 180                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 181        /* Delete deva from dpm_list and reinsert after devb. */
 182        list_move(&deva->power.entry, &devb->power.entry);
 183}
 184
 185/**
 186 * device_pm_move_last - Move device to end of the PM core's list of devices.
 187 * @dev: Device to move in dpm_list.
 188 */
 189void device_pm_move_last(struct device *dev)
 190{
 191        pr_debug("PM: Moving %s:%s to end of list\n",
 192                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 193        list_move_tail(&dev->power.entry, &dpm_list);
 194}
 195
 196static ktime_t initcall_debug_start(struct device *dev)
 197{
 198        ktime_t calltime = 0;
 199
 200        if (pm_print_times_enabled) {
 201                pr_info("calling  %s+ @ %i, parent: %s\n",
 202                        dev_name(dev), task_pid_nr(current),
 203                        dev->parent ? dev_name(dev->parent) : "none");
 204                calltime = ktime_get();
 205        }
 206
 207        return calltime;
 208}
 209
 210static void initcall_debug_report(struct device *dev, ktime_t calltime,
 211                                  int error, pm_message_t state,
 212                                  const char *info)
 213{
 214        ktime_t rettime;
 215        s64 nsecs;
 216
 217        rettime = ktime_get();
 218        nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 219
 220        if (pm_print_times_enabled) {
 221                pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
 222                        error, (unsigned long long)nsecs >> 10);
 223        }
 224}
 225
 226/**
 227 * dpm_wait - Wait for a PM operation to complete.
 228 * @dev: Device to wait for.
 229 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 230 */
 231static void dpm_wait(struct device *dev, bool async)
 232{
 233        if (!dev)
 234                return;
 235
 236        if (async || (pm_async_enabled && dev->power.async_suspend))
 237                wait_for_completion(&dev->power.completion);
 238}
 239
 240static int dpm_wait_fn(struct device *dev, void *async_ptr)
 241{
 242        dpm_wait(dev, *((bool *)async_ptr));
 243        return 0;
 244}
 245
 246static void dpm_wait_for_children(struct device *dev, bool async)
 247{
 248       device_for_each_child(dev, &async, dpm_wait_fn);
 249}
 250
 251static void dpm_wait_for_suppliers(struct device *dev, bool async)
 252{
 253        struct device_link *link;
 254        int idx;
 255
 256        idx = device_links_read_lock();
 257
 258        /*
 259         * If the supplier goes away right after we've checked the link to it,
 260         * we'll wait for its completion to change the state, but that's fine,
 261         * because the only things that will block as a result are the SRCU
 262         * callbacks freeing the link objects for the links in the list we're
 263         * walking.
 264         */
 265        list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
 266                if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 267                        dpm_wait(link->supplier, async);
 268
 269        device_links_read_unlock(idx);
 270}
 271
 272static void dpm_wait_for_superior(struct device *dev, bool async)
 273{
 274        dpm_wait(dev->parent, async);
 275        dpm_wait_for_suppliers(dev, async);
 276}
 277
 278static void dpm_wait_for_consumers(struct device *dev, bool async)
 279{
 280        struct device_link *link;
 281        int idx;
 282
 283        idx = device_links_read_lock();
 284
 285        /*
 286         * The status of a device link can only be changed from "dormant" by a
 287         * probe, but that cannot happen during system suspend/resume.  In
 288         * theory it can change to "dormant" at that time, but then it is
 289         * reasonable to wait for the target device anyway (eg. if it goes
 290         * away, it's better to wait for it to go away completely and then
 291         * continue instead of trying to continue in parallel with its
 292         * unregistration).
 293         */
 294        list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
 295                if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 296                        dpm_wait(link->consumer, async);
 297
 298        device_links_read_unlock(idx);
 299}
 300
 301static void dpm_wait_for_subordinate(struct device *dev, bool async)
 302{
 303        dpm_wait_for_children(dev, async);
 304        dpm_wait_for_consumers(dev, async);
 305}
 306
 307/**
 308 * pm_op - Return the PM operation appropriate for given PM event.
 309 * @ops: PM operations to choose from.
 310 * @state: PM transition of the system being carried out.
 311 */
 312static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 313{
 314        switch (state.event) {
 315#ifdef CONFIG_SUSPEND
 316        case PM_EVENT_SUSPEND:
 317                return ops->suspend;
 318        case PM_EVENT_RESUME:
 319                return ops->resume;
 320#endif /* CONFIG_SUSPEND */
 321#ifdef CONFIG_HIBERNATE_CALLBACKS
 322        case PM_EVENT_FREEZE:
 323        case PM_EVENT_QUIESCE:
 324                return ops->freeze;
 325        case PM_EVENT_HIBERNATE:
 326                return ops->poweroff;
 327        case PM_EVENT_THAW:
 328        case PM_EVENT_RECOVER:
 329                return ops->thaw;
 330                break;
 331        case PM_EVENT_RESTORE:
 332                return ops->restore;
 333#endif /* CONFIG_HIBERNATE_CALLBACKS */
 334        }
 335
 336        return NULL;
 337}
 338
 339/**
 340 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 341 * @ops: PM operations to choose from.
 342 * @state: PM transition of the system being carried out.
 343 *
 344 * Runtime PM is disabled for @dev while this function is being executed.
 345 */
 346static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 347                                      pm_message_t state)
 348{
 349        switch (state.event) {
 350#ifdef CONFIG_SUSPEND
 351        case PM_EVENT_SUSPEND:
 352                return ops->suspend_late;
 353        case PM_EVENT_RESUME:
 354                return ops->resume_early;
 355#endif /* CONFIG_SUSPEND */
 356#ifdef CONFIG_HIBERNATE_CALLBACKS
 357        case PM_EVENT_FREEZE:
 358        case PM_EVENT_QUIESCE:
 359                return ops->freeze_late;
 360        case PM_EVENT_HIBERNATE:
 361                return ops->poweroff_late;
 362        case PM_EVENT_THAW:
 363        case PM_EVENT_RECOVER:
 364                return ops->thaw_early;
 365        case PM_EVENT_RESTORE:
 366                return ops->restore_early;
 367#endif /* CONFIG_HIBERNATE_CALLBACKS */
 368        }
 369
 370        return NULL;
 371}
 372
 373/**
 374 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 375 * @ops: PM operations to choose from.
 376 * @state: PM transition of the system being carried out.
 377 *
 378 * The driver of @dev will not receive interrupts while this function is being
 379 * executed.
 380 */
 381static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 382{
 383        switch (state.event) {
 384#ifdef CONFIG_SUSPEND
 385        case PM_EVENT_SUSPEND:
 386                return ops->suspend_noirq;
 387        case PM_EVENT_RESUME:
 388                return ops->resume_noirq;
 389#endif /* CONFIG_SUSPEND */
 390#ifdef CONFIG_HIBERNATE_CALLBACKS
 391        case PM_EVENT_FREEZE:
 392        case PM_EVENT_QUIESCE:
 393                return ops->freeze_noirq;
 394        case PM_EVENT_HIBERNATE:
 395                return ops->poweroff_noirq;
 396        case PM_EVENT_THAW:
 397        case PM_EVENT_RECOVER:
 398                return ops->thaw_noirq;
 399        case PM_EVENT_RESTORE:
 400                return ops->restore_noirq;
 401#endif /* CONFIG_HIBERNATE_CALLBACKS */
 402        }
 403
 404        return NULL;
 405}
 406
 407static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
 408{
 409        dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 410                ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 411                ", may wakeup" : "");
 412}
 413
 414static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
 415                        int error)
 416{
 417        printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
 418                dev_name(dev), pm_verb(state.event), info, error);
 419}
 420
 421static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
 422                          const char *info)
 423{
 424        ktime_t calltime;
 425        u64 usecs64;
 426        int usecs;
 427
 428        calltime = ktime_get();
 429        usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 430        do_div(usecs64, NSEC_PER_USEC);
 431        usecs = usecs64;
 432        if (usecs == 0)
 433                usecs = 1;
 434
 435        pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
 436                  info ?: "", info ? " " : "", pm_verb(state.event),
 437                  error ? "aborted" : "complete",
 438                  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 439}
 440
 441static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 442                            pm_message_t state, const char *info)
 443{
 444        ktime_t calltime;
 445        int error;
 446
 447        if (!cb)
 448                return 0;
 449
 450        calltime = initcall_debug_start(dev);
 451
 452        pm_dev_dbg(dev, state, info);
 453        trace_device_pm_callback_start(dev, info, state.event);
 454        error = cb(dev);
 455        trace_device_pm_callback_end(dev, error);
 456        suspend_report_result(cb, error);
 457
 458        initcall_debug_report(dev, calltime, error, state, info);
 459
 460        return error;
 461}
 462
 463#ifdef CONFIG_DPM_WATCHDOG
 464struct dpm_watchdog {
 465        struct device           *dev;
 466        struct task_struct      *tsk;
 467        struct timer_list       timer;
 468};
 469
 470#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 471        struct dpm_watchdog wd
 472
 473/**
 474 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 475 * @data: Watchdog object address.
 476 *
 477 * Called when a driver has timed out suspending or resuming.
 478 * There's not much we can do here to recover so panic() to
 479 * capture a crash-dump in pstore.
 480 */
 481static void dpm_watchdog_handler(struct timer_list *t)
 482{
 483        struct dpm_watchdog *wd = from_timer(wd, t, timer);
 484
 485        dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 486        show_stack(wd->tsk, NULL);
 487        panic("%s %s: unrecoverable failure\n",
 488                dev_driver_string(wd->dev), dev_name(wd->dev));
 489}
 490
 491/**
 492 * dpm_watchdog_set - Enable pm watchdog for given device.
 493 * @wd: Watchdog. Must be allocated on the stack.
 494 * @dev: Device to handle.
 495 */
 496static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 497{
 498        struct timer_list *timer = &wd->timer;
 499
 500        wd->dev = dev;
 501        wd->tsk = current;
 502
 503        timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
 504        /* use same timeout value for both suspend and resume */
 505        timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 506        add_timer(timer);
 507}
 508
 509/**
 510 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 511 * @wd: Watchdog to disable.
 512 */
 513static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 514{
 515        struct timer_list *timer = &wd->timer;
 516
 517        del_timer_sync(timer);
 518        destroy_timer_on_stack(timer);
 519}
 520#else
 521#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 522#define dpm_watchdog_set(x, y)
 523#define dpm_watchdog_clear(x)
 524#endif
 525
 526/*------------------------- Resume routines -------------------------*/
 527
 528/**
 529 * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
 530 * @dev: Target device.
 531 *
 532 * Make the core skip the "early resume" and "resume" phases for @dev.
 533 *
 534 * This function can be called by middle-layer code during the "noirq" phase of
 535 * system resume if necessary, but not by device drivers.
 536 */
 537void dev_pm_skip_next_resume_phases(struct device *dev)
 538{
 539        dev->power.is_late_suspended = false;
 540        dev->power.is_suspended = false;
 541}
 542
 543/**
 544 * device_resume_noirq - Execute a "noirq resume" callback for given device.
 545 * @dev: Device to handle.
 546 * @state: PM transition of the system being carried out.
 547 * @async: If true, the device is being resumed asynchronously.
 548 *
 549 * The driver of @dev will not receive interrupts while this function is being
 550 * executed.
 551 */
 552static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 553{
 554        pm_callback_t callback = NULL;
 555        const char *info = NULL;
 556        int error = 0;
 557
 558        TRACE_DEVICE(dev);
 559        TRACE_RESUME(0);
 560
 561        if (dev->power.syscore || dev->power.direct_complete)
 562                goto Out;
 563
 564        if (!dev->power.is_noirq_suspended)
 565                goto Out;
 566
 567        dpm_wait_for_superior(dev, async);
 568
 569        if (dev->pm_domain) {
 570                info = "noirq power domain ";
 571                callback = pm_noirq_op(&dev->pm_domain->ops, state);
 572        } else if (dev->type && dev->type->pm) {
 573                info = "noirq type ";
 574                callback = pm_noirq_op(dev->type->pm, state);
 575        } else if (dev->class && dev->class->pm) {
 576                info = "noirq class ";
 577                callback = pm_noirq_op(dev->class->pm, state);
 578        } else if (dev->bus && dev->bus->pm) {
 579                info = "noirq bus ";
 580                callback = pm_noirq_op(dev->bus->pm, state);
 581        }
 582
 583        if (!callback && dev->driver && dev->driver->pm) {
 584                info = "noirq driver ";
 585                callback = pm_noirq_op(dev->driver->pm, state);
 586        }
 587
 588        error = dpm_run_callback(callback, dev, state, info);
 589        dev->power.is_noirq_suspended = false;
 590
 591 Out:
 592        complete_all(&dev->power.completion);
 593        TRACE_RESUME(error);
 594        return error;
 595}
 596
 597static bool is_async(struct device *dev)
 598{
 599        return dev->power.async_suspend && pm_async_enabled
 600                && !pm_trace_is_enabled();
 601}
 602
 603static void async_resume_noirq(void *data, async_cookie_t cookie)
 604{
 605        struct device *dev = (struct device *)data;
 606        int error;
 607
 608        error = device_resume_noirq(dev, pm_transition, true);
 609        if (error)
 610                pm_dev_err(dev, pm_transition, " async", error);
 611
 612        put_device(dev);
 613}
 614
 615void dpm_noirq_resume_devices(pm_message_t state)
 616{
 617        struct device *dev;
 618        ktime_t starttime = ktime_get();
 619
 620        trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 621        mutex_lock(&dpm_list_mtx);
 622        pm_transition = state;
 623
 624        /*
 625         * Advanced the async threads upfront,
 626         * in case the starting of async threads is
 627         * delayed by non-async resuming devices.
 628         */
 629        list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
 630                reinit_completion(&dev->power.completion);
 631                if (is_async(dev)) {
 632                        get_device(dev);
 633                        async_schedule(async_resume_noirq, dev);
 634                }
 635        }
 636
 637        while (!list_empty(&dpm_noirq_list)) {
 638                dev = to_device(dpm_noirq_list.next);
 639                get_device(dev);
 640                list_move_tail(&dev->power.entry, &dpm_late_early_list);
 641                mutex_unlock(&dpm_list_mtx);
 642
 643                if (!is_async(dev)) {
 644                        int error;
 645
 646                        error = device_resume_noirq(dev, state, false);
 647                        if (error) {
 648                                suspend_stats.failed_resume_noirq++;
 649                                dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 650                                dpm_save_failed_dev(dev_name(dev));
 651                                pm_dev_err(dev, state, " noirq", error);
 652                        }
 653                }
 654
 655                mutex_lock(&dpm_list_mtx);
 656                put_device(dev);
 657        }
 658        mutex_unlock(&dpm_list_mtx);
 659        async_synchronize_full();
 660        dpm_show_time(starttime, state, 0, "noirq");
 661        trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 662}
 663
 664void dpm_noirq_end(void)
 665{
 666        resume_device_irqs();
 667        device_wakeup_disarm_wake_irqs();
 668        cpuidle_resume();
 669}
 670
 671/**
 672 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 673 * @state: PM transition of the system being carried out.
 674 *
 675 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
 676 * allow device drivers' interrupt handlers to be called.
 677 */
 678void dpm_resume_noirq(pm_message_t state)
 679{
 680        dpm_noirq_resume_devices(state);
 681        dpm_noirq_end();
 682}
 683
 684/**
 685 * device_resume_early - Execute an "early resume" callback for given device.
 686 * @dev: Device to handle.
 687 * @state: PM transition of the system being carried out.
 688 * @async: If true, the device is being resumed asynchronously.
 689 *
 690 * Runtime PM is disabled for @dev while this function is being executed.
 691 */
 692static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 693{
 694        pm_callback_t callback = NULL;
 695        const char *info = NULL;
 696        int error = 0;
 697
 698        TRACE_DEVICE(dev);
 699        TRACE_RESUME(0);
 700
 701        if (dev->power.syscore || dev->power.direct_complete)
 702                goto Out;
 703
 704        if (!dev->power.is_late_suspended)
 705                goto Out;
 706
 707        dpm_wait_for_superior(dev, async);
 708
 709        if (dev->pm_domain) {
 710                info = "early power domain ";
 711                callback = pm_late_early_op(&dev->pm_domain->ops, state);
 712        } else if (dev->type && dev->type->pm) {
 713                info = "early type ";
 714                callback = pm_late_early_op(dev->type->pm, state);
 715        } else if (dev->class && dev->class->pm) {
 716                info = "early class ";
 717                callback = pm_late_early_op(dev->class->pm, state);
 718        } else if (dev->bus && dev->bus->pm) {
 719                info = "early bus ";
 720                callback = pm_late_early_op(dev->bus->pm, state);
 721        }
 722
 723        if (!callback && dev->driver && dev->driver->pm) {
 724                info = "early driver ";
 725                callback = pm_late_early_op(dev->driver->pm, state);
 726        }
 727
 728        error = dpm_run_callback(callback, dev, state, info);
 729        dev->power.is_late_suspended = false;
 730
 731 Out:
 732        TRACE_RESUME(error);
 733
 734        pm_runtime_enable(dev);
 735        complete_all(&dev->power.completion);
 736        return error;
 737}
 738
 739static void async_resume_early(void *data, async_cookie_t cookie)
 740{
 741        struct device *dev = (struct device *)data;
 742        int error;
 743
 744        error = device_resume_early(dev, pm_transition, true);
 745        if (error)
 746                pm_dev_err(dev, pm_transition, " async", error);
 747
 748        put_device(dev);
 749}
 750
 751/**
 752 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 753 * @state: PM transition of the system being carried out.
 754 */
 755void dpm_resume_early(pm_message_t state)
 756{
 757        struct device *dev;
 758        ktime_t starttime = ktime_get();
 759
 760        trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 761        mutex_lock(&dpm_list_mtx);
 762        pm_transition = state;
 763
 764        /*
 765         * Advanced the async threads upfront,
 766         * in case the starting of async threads is
 767         * delayed by non-async resuming devices.
 768         */
 769        list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
 770                reinit_completion(&dev->power.completion);
 771                if (is_async(dev)) {
 772                        get_device(dev);
 773                        async_schedule(async_resume_early, dev);
 774                }
 775        }
 776
 777        while (!list_empty(&dpm_late_early_list)) {
 778                dev = to_device(dpm_late_early_list.next);
 779                get_device(dev);
 780                list_move_tail(&dev->power.entry, &dpm_suspended_list);
 781                mutex_unlock(&dpm_list_mtx);
 782
 783                if (!is_async(dev)) {
 784                        int error;
 785
 786                        error = device_resume_early(dev, state, false);
 787                        if (error) {
 788                                suspend_stats.failed_resume_early++;
 789                                dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 790                                dpm_save_failed_dev(dev_name(dev));
 791                                pm_dev_err(dev, state, " early", error);
 792                        }
 793                }
 794                mutex_lock(&dpm_list_mtx);
 795                put_device(dev);
 796        }
 797        mutex_unlock(&dpm_list_mtx);
 798        async_synchronize_full();
 799        dpm_show_time(starttime, state, 0, "early");
 800        trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 801}
 802
 803/**
 804 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 805 * @state: PM transition of the system being carried out.
 806 */
 807void dpm_resume_start(pm_message_t state)
 808{
 809        dpm_resume_noirq(state);
 810        dpm_resume_early(state);
 811}
 812EXPORT_SYMBOL_GPL(dpm_resume_start);
 813
 814/**
 815 * device_resume - Execute "resume" callbacks for given device.
 816 * @dev: Device to handle.
 817 * @state: PM transition of the system being carried out.
 818 * @async: If true, the device is being resumed asynchronously.
 819 */
 820static int device_resume(struct device *dev, pm_message_t state, bool async)
 821{
 822        pm_callback_t callback = NULL;
 823        const char *info = NULL;
 824        int error = 0;
 825        DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 826
 827        TRACE_DEVICE(dev);
 828        TRACE_RESUME(0);
 829
 830        if (dev->power.syscore)
 831                goto Complete;
 832
 833        if (dev->power.direct_complete) {
 834                /* Match the pm_runtime_disable() in __device_suspend(). */
 835                pm_runtime_enable(dev);
 836                goto Complete;
 837        }
 838
 839        dpm_wait_for_superior(dev, async);
 840        dpm_watchdog_set(&wd, dev);
 841        device_lock(dev);
 842
 843        /*
 844         * This is a fib.  But we'll allow new children to be added below
 845         * a resumed device, even if the device hasn't been completed yet.
 846         */
 847        dev->power.is_prepared = false;
 848
 849        if (!dev->power.is_suspended)
 850                goto Unlock;
 851
 852        if (dev->pm_domain) {
 853                info = "power domain ";
 854                callback = pm_op(&dev->pm_domain->ops, state);
 855                goto Driver;
 856        }
 857
 858        if (dev->type && dev->type->pm) {
 859                info = "type ";
 860                callback = pm_op(dev->type->pm, state);
 861                goto Driver;
 862        }
 863
 864        if (dev->class && dev->class->pm) {
 865                info = "class ";
 866                callback = pm_op(dev->class->pm, state);
 867                goto Driver;
 868        }
 869
 870        if (dev->bus) {
 871                if (dev->bus->pm) {
 872                        info = "bus ";
 873                        callback = pm_op(dev->bus->pm, state);
 874                } else if (dev->bus->resume) {
 875                        info = "legacy bus ";
 876                        callback = dev->bus->resume;
 877                        goto End;
 878                }
 879        }
 880
 881 Driver:
 882        if (!callback && dev->driver && dev->driver->pm) {
 883                info = "driver ";
 884                callback = pm_op(dev->driver->pm, state);
 885        }
 886
 887 End:
 888        error = dpm_run_callback(callback, dev, state, info);
 889        dev->power.is_suspended = false;
 890
 891 Unlock:
 892        device_unlock(dev);
 893        dpm_watchdog_clear(&wd);
 894
 895 Complete:
 896        complete_all(&dev->power.completion);
 897
 898        TRACE_RESUME(error);
 899
 900        return error;
 901}
 902
 903static void async_resume(void *data, async_cookie_t cookie)
 904{
 905        struct device *dev = (struct device *)data;
 906        int error;
 907
 908        error = device_resume(dev, pm_transition, true);
 909        if (error)
 910                pm_dev_err(dev, pm_transition, " async", error);
 911        put_device(dev);
 912}
 913
 914/**
 915 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 916 * @state: PM transition of the system being carried out.
 917 *
 918 * Execute the appropriate "resume" callback for all devices whose status
 919 * indicates that they are suspended.
 920 */
 921void dpm_resume(pm_message_t state)
 922{
 923        struct device *dev;
 924        ktime_t starttime = ktime_get();
 925
 926        trace_suspend_resume(TPS("dpm_resume"), state.event, true);
 927        might_sleep();
 928
 929        mutex_lock(&dpm_list_mtx);
 930        pm_transition = state;
 931        async_error = 0;
 932
 933        list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
 934                reinit_completion(&dev->power.completion);
 935                if (is_async(dev)) {
 936                        get_device(dev);
 937                        async_schedule(async_resume, dev);
 938                }
 939        }
 940
 941        while (!list_empty(&dpm_suspended_list)) {
 942                dev = to_device(dpm_suspended_list.next);
 943                get_device(dev);
 944                if (!is_async(dev)) {
 945                        int error;
 946
 947                        mutex_unlock(&dpm_list_mtx);
 948
 949                        error = device_resume(dev, state, false);
 950                        if (error) {
 951                                suspend_stats.failed_resume++;
 952                                dpm_save_failed_step(SUSPEND_RESUME);
 953                                dpm_save_failed_dev(dev_name(dev));
 954                                pm_dev_err(dev, state, "", error);
 955                        }
 956
 957                        mutex_lock(&dpm_list_mtx);
 958                }
 959                if (!list_empty(&dev->power.entry))
 960                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
 961                put_device(dev);
 962        }
 963        mutex_unlock(&dpm_list_mtx);
 964        async_synchronize_full();
 965        dpm_show_time(starttime, state, 0, NULL);
 966
 967        cpufreq_resume();
 968        trace_suspend_resume(TPS("dpm_resume"), state.event, false);
 969}
 970
 971/**
 972 * device_complete - Complete a PM transition for given device.
 973 * @dev: Device to handle.
 974 * @state: PM transition of the system being carried out.
 975 */
 976static void device_complete(struct device *dev, pm_message_t state)
 977{
 978        void (*callback)(struct device *) = NULL;
 979        const char *info = NULL;
 980
 981        if (dev->power.syscore)
 982                return;
 983
 984        device_lock(dev);
 985
 986        if (dev->pm_domain) {
 987                info = "completing power domain ";
 988                callback = dev->pm_domain->ops.complete;
 989        } else if (dev->type && dev->type->pm) {
 990                info = "completing type ";
 991                callback = dev->type->pm->complete;
 992        } else if (dev->class && dev->class->pm) {
 993                info = "completing class ";
 994                callback = dev->class->pm->complete;
 995        } else if (dev->bus && dev->bus->pm) {
 996                info = "completing bus ";
 997                callback = dev->bus->pm->complete;
 998        }
 999
1000        if (!callback && dev->driver && dev->driver->pm) {
1001                info = "completing driver ";
1002                callback = dev->driver->pm->complete;
1003        }
1004
1005        if (callback) {
1006                pm_dev_dbg(dev, state, info);
1007                callback(dev);
1008        }
1009
1010        device_unlock(dev);
1011
1012        pm_runtime_put(dev);
1013}
1014
1015/**
1016 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1017 * @state: PM transition of the system being carried out.
1018 *
1019 * Execute the ->complete() callbacks for all devices whose PM status is not
1020 * DPM_ON (this allows new devices to be registered).
1021 */
1022void dpm_complete(pm_message_t state)
1023{
1024        struct list_head list;
1025
1026        trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1027        might_sleep();
1028
1029        INIT_LIST_HEAD(&list);
1030        mutex_lock(&dpm_list_mtx);
1031        while (!list_empty(&dpm_prepared_list)) {
1032                struct device *dev = to_device(dpm_prepared_list.prev);
1033
1034                get_device(dev);
1035                dev->power.is_prepared = false;
1036                list_move(&dev->power.entry, &list);
1037                mutex_unlock(&dpm_list_mtx);
1038
1039                trace_device_pm_callback_start(dev, "", state.event);
1040                device_complete(dev, state);
1041                trace_device_pm_callback_end(dev, 0);
1042
1043                mutex_lock(&dpm_list_mtx);
1044                put_device(dev);
1045        }
1046        list_splice(&list, &dpm_list);
1047        mutex_unlock(&dpm_list_mtx);
1048
1049        /* Allow device probing and trigger re-probing of deferred devices */
1050        device_unblock_probing();
1051        trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1052}
1053
1054/**
1055 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1056 * @state: PM transition of the system being carried out.
1057 *
1058 * Execute "resume" callbacks for all devices and complete the PM transition of
1059 * the system.
1060 */
1061void dpm_resume_end(pm_message_t state)
1062{
1063        dpm_resume(state);
1064        dpm_complete(state);
1065}
1066EXPORT_SYMBOL_GPL(dpm_resume_end);
1067
1068
1069/*------------------------- Suspend routines -------------------------*/
1070
1071/**
1072 * resume_event - Return a "resume" message for given "suspend" sleep state.
1073 * @sleep_state: PM message representing a sleep state.
1074 *
1075 * Return a PM message representing the resume event corresponding to given
1076 * sleep state.
1077 */
1078static pm_message_t resume_event(pm_message_t sleep_state)
1079{
1080        switch (sleep_state.event) {
1081        case PM_EVENT_SUSPEND:
1082                return PMSG_RESUME;
1083        case PM_EVENT_FREEZE:
1084        case PM_EVENT_QUIESCE:
1085                return PMSG_RECOVER;
1086        case PM_EVENT_HIBERNATE:
1087                return PMSG_RESTORE;
1088        }
1089        return PMSG_ON;
1090}
1091
1092/**
1093 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1094 * @dev: Device to handle.
1095 * @state: PM transition of the system being carried out.
1096 * @async: If true, the device is being suspended asynchronously.
1097 *
1098 * The driver of @dev will not receive interrupts while this function is being
1099 * executed.
1100 */
1101static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1102{
1103        pm_callback_t callback = NULL;
1104        const char *info = NULL;
1105        int error = 0;
1106
1107        TRACE_DEVICE(dev);
1108        TRACE_SUSPEND(0);
1109
1110        dpm_wait_for_subordinate(dev, async);
1111
1112        if (async_error)
1113                goto Complete;
1114
1115        if (pm_wakeup_pending()) {
1116                async_error = -EBUSY;
1117                goto Complete;
1118        }
1119
1120        if (dev->power.syscore || dev->power.direct_complete)
1121                goto Complete;
1122
1123        if (dev->pm_domain) {
1124                info = "noirq power domain ";
1125                callback = pm_noirq_op(&dev->pm_domain->ops, state);
1126        } else if (dev->type && dev->type->pm) {
1127                info = "noirq type ";
1128                callback = pm_noirq_op(dev->type->pm, state);
1129        } else if (dev->class && dev->class->pm) {
1130                info = "noirq class ";
1131                callback = pm_noirq_op(dev->class->pm, state);
1132        } else if (dev->bus && dev->bus->pm) {
1133                info = "noirq bus ";
1134                callback = pm_noirq_op(dev->bus->pm, state);
1135        }
1136
1137        if (!callback && dev->driver && dev->driver->pm) {
1138                info = "noirq driver ";
1139                callback = pm_noirq_op(dev->driver->pm, state);
1140        }
1141
1142        error = dpm_run_callback(callback, dev, state, info);
1143        if (!error)
1144                dev->power.is_noirq_suspended = true;
1145        else
1146                async_error = error;
1147
1148Complete:
1149        complete_all(&dev->power.completion);
1150        TRACE_SUSPEND(error);
1151        return error;
1152}
1153
1154static void async_suspend_noirq(void *data, async_cookie_t cookie)
1155{
1156        struct device *dev = (struct device *)data;
1157        int error;
1158
1159        error = __device_suspend_noirq(dev, pm_transition, true);
1160        if (error) {
1161                dpm_save_failed_dev(dev_name(dev));
1162                pm_dev_err(dev, pm_transition, " async", error);
1163        }
1164
1165        put_device(dev);
1166}
1167
1168static int device_suspend_noirq(struct device *dev)
1169{
1170        reinit_completion(&dev->power.completion);
1171
1172        if (is_async(dev)) {
1173                get_device(dev);
1174                async_schedule(async_suspend_noirq, dev);
1175                return 0;
1176        }
1177        return __device_suspend_noirq(dev, pm_transition, false);
1178}
1179
1180void dpm_noirq_begin(void)
1181{
1182        cpuidle_pause();
1183        device_wakeup_arm_wake_irqs();
1184        suspend_device_irqs();
1185}
1186
1187int dpm_noirq_suspend_devices(pm_message_t state)
1188{
1189        ktime_t starttime = ktime_get();
1190        int error = 0;
1191
1192        trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1193        mutex_lock(&dpm_list_mtx);
1194        pm_transition = state;
1195        async_error = 0;
1196
1197        while (!list_empty(&dpm_late_early_list)) {
1198                struct device *dev = to_device(dpm_late_early_list.prev);
1199
1200                get_device(dev);
1201                mutex_unlock(&dpm_list_mtx);
1202
1203                error = device_suspend_noirq(dev);
1204
1205                mutex_lock(&dpm_list_mtx);
1206                if (error) {
1207                        pm_dev_err(dev, state, " noirq", error);
1208                        dpm_save_failed_dev(dev_name(dev));
1209                        put_device(dev);
1210                        break;
1211                }
1212                if (!list_empty(&dev->power.entry))
1213                        list_move(&dev->power.entry, &dpm_noirq_list);
1214                put_device(dev);
1215
1216                if (async_error)
1217                        break;
1218        }
1219        mutex_unlock(&dpm_list_mtx);
1220        async_synchronize_full();
1221        if (!error)
1222                error = async_error;
1223
1224        if (error) {
1225                suspend_stats.failed_suspend_noirq++;
1226                dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1227        }
1228        dpm_show_time(starttime, state, error, "noirq");
1229        trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1230        return error;
1231}
1232
1233/**
1234 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1235 * @state: PM transition of the system being carried out.
1236 *
1237 * Prevent device drivers' interrupt handlers from being called and invoke
1238 * "noirq" suspend callbacks for all non-sysdev devices.
1239 */
1240int dpm_suspend_noirq(pm_message_t state)
1241{
1242        int ret;
1243
1244        dpm_noirq_begin();
1245        ret = dpm_noirq_suspend_devices(state);
1246        if (ret)
1247                dpm_resume_noirq(resume_event(state));
1248
1249        return ret;
1250}
1251
1252/**
1253 * __device_suspend_late - Execute a "late suspend" callback for given device.
1254 * @dev: Device to handle.
1255 * @state: PM transition of the system being carried out.
1256 * @async: If true, the device is being suspended asynchronously.
1257 *
1258 * Runtime PM is disabled for @dev while this function is being executed.
1259 */
1260static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1261{
1262        pm_callback_t callback = NULL;
1263        const char *info = NULL;
1264        int error = 0;
1265
1266        TRACE_DEVICE(dev);
1267        TRACE_SUSPEND(0);
1268
1269        __pm_runtime_disable(dev, false);
1270
1271        dpm_wait_for_subordinate(dev, async);
1272
1273        if (async_error)
1274                goto Complete;
1275
1276        if (pm_wakeup_pending()) {
1277                async_error = -EBUSY;
1278                goto Complete;
1279        }
1280
1281        if (dev->power.syscore || dev->power.direct_complete)
1282                goto Complete;
1283
1284        if (dev->pm_domain) {
1285                info = "late power domain ";
1286                callback = pm_late_early_op(&dev->pm_domain->ops, state);
1287        } else if (dev->type && dev->type->pm) {
1288                info = "late type ";
1289                callback = pm_late_early_op(dev->type->pm, state);
1290        } else if (dev->class && dev->class->pm) {
1291                info = "late class ";
1292                callback = pm_late_early_op(dev->class->pm, state);
1293        } else if (dev->bus && dev->bus->pm) {
1294                info = "late bus ";
1295                callback = pm_late_early_op(dev->bus->pm, state);
1296        }
1297
1298        if (!callback && dev->driver && dev->driver->pm) {
1299                info = "late driver ";
1300                callback = pm_late_early_op(dev->driver->pm, state);
1301        }
1302
1303        error = dpm_run_callback(callback, dev, state, info);
1304        if (!error)
1305                dev->power.is_late_suspended = true;
1306        else
1307                async_error = error;
1308
1309Complete:
1310        TRACE_SUSPEND(error);
1311        complete_all(&dev->power.completion);
1312        return error;
1313}
1314
1315static void async_suspend_late(void *data, async_cookie_t cookie)
1316{
1317        struct device *dev = (struct device *)data;
1318        int error;
1319
1320        error = __device_suspend_late(dev, pm_transition, true);
1321        if (error) {
1322                dpm_save_failed_dev(dev_name(dev));
1323                pm_dev_err(dev, pm_transition, " async", error);
1324        }
1325        put_device(dev);
1326}
1327
1328static int device_suspend_late(struct device *dev)
1329{
1330        reinit_completion(&dev->power.completion);
1331
1332        if (is_async(dev)) {
1333                get_device(dev);
1334                async_schedule(async_suspend_late, dev);
1335                return 0;
1336        }
1337
1338        return __device_suspend_late(dev, pm_transition, false);
1339}
1340
1341/**
1342 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1343 * @state: PM transition of the system being carried out.
1344 */
1345int dpm_suspend_late(pm_message_t state)
1346{
1347        ktime_t starttime = ktime_get();
1348        int error = 0;
1349
1350        trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1351        mutex_lock(&dpm_list_mtx);
1352        pm_transition = state;
1353        async_error = 0;
1354
1355        while (!list_empty(&dpm_suspended_list)) {
1356                struct device *dev = to_device(dpm_suspended_list.prev);
1357
1358                get_device(dev);
1359                mutex_unlock(&dpm_list_mtx);
1360
1361                error = device_suspend_late(dev);
1362
1363                mutex_lock(&dpm_list_mtx);
1364                if (!list_empty(&dev->power.entry))
1365                        list_move(&dev->power.entry, &dpm_late_early_list);
1366
1367                if (error) {
1368                        pm_dev_err(dev, state, " late", error);
1369                        dpm_save_failed_dev(dev_name(dev));
1370                        put_device(dev);
1371                        break;
1372                }
1373                put_device(dev);
1374
1375                if (async_error)
1376                        break;
1377        }
1378        mutex_unlock(&dpm_list_mtx);
1379        async_synchronize_full();
1380        if (!error)
1381                error = async_error;
1382        if (error) {
1383                suspend_stats.failed_suspend_late++;
1384                dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1385                dpm_resume_early(resume_event(state));
1386        }
1387        dpm_show_time(starttime, state, error, "late");
1388        trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1389        return error;
1390}
1391
1392/**
1393 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1394 * @state: PM transition of the system being carried out.
1395 */
1396int dpm_suspend_end(pm_message_t state)
1397{
1398        int error = dpm_suspend_late(state);
1399        if (error)
1400                return error;
1401
1402        error = dpm_suspend_noirq(state);
1403        if (error) {
1404                dpm_resume_early(resume_event(state));
1405                return error;
1406        }
1407
1408        return 0;
1409}
1410EXPORT_SYMBOL_GPL(dpm_suspend_end);
1411
1412/**
1413 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1414 * @dev: Device to suspend.
1415 * @state: PM transition of the system being carried out.
1416 * @cb: Suspend callback to execute.
1417 * @info: string description of caller.
1418 */
1419static int legacy_suspend(struct device *dev, pm_message_t state,
1420                          int (*cb)(struct device *dev, pm_message_t state),
1421                          const char *info)
1422{
1423        int error;
1424        ktime_t calltime;
1425
1426        calltime = initcall_debug_start(dev);
1427
1428        trace_device_pm_callback_start(dev, info, state.event);
1429        error = cb(dev, state);
1430        trace_device_pm_callback_end(dev, error);
1431        suspend_report_result(cb, error);
1432
1433        initcall_debug_report(dev, calltime, error, state, info);
1434
1435        return error;
1436}
1437
1438static void dpm_clear_suppliers_direct_complete(struct device *dev)
1439{
1440        struct device_link *link;
1441        int idx;
1442
1443        idx = device_links_read_lock();
1444
1445        list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1446                spin_lock_irq(&link->supplier->power.lock);
1447                link->supplier->power.direct_complete = false;
1448                spin_unlock_irq(&link->supplier->power.lock);
1449        }
1450
1451        device_links_read_unlock(idx);
1452}
1453
1454/**
1455 * __device_suspend - Execute "suspend" callbacks for given device.
1456 * @dev: Device to handle.
1457 * @state: PM transition of the system being carried out.
1458 * @async: If true, the device is being suspended asynchronously.
1459 */
1460static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1461{
1462        pm_callback_t callback = NULL;
1463        const char *info = NULL;
1464        int error = 0;
1465        DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1466
1467        TRACE_DEVICE(dev);
1468        TRACE_SUSPEND(0);
1469
1470        dpm_wait_for_subordinate(dev, async);
1471
1472        if (async_error)
1473                goto Complete;
1474
1475        /*
1476         * If a device configured to wake up the system from sleep states
1477         * has been suspended at run time and there's a resume request pending
1478         * for it, this is equivalent to the device signaling wakeup, so the
1479         * system suspend operation should be aborted.
1480         */
1481        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1482                pm_wakeup_event(dev, 0);
1483
1484        if (pm_wakeup_pending()) {
1485                async_error = -EBUSY;
1486                goto Complete;
1487        }
1488
1489        if (dev->power.syscore)
1490                goto Complete;
1491
1492        if (dev->power.direct_complete) {
1493                if (pm_runtime_status_suspended(dev)) {
1494                        pm_runtime_disable(dev);
1495                        if (pm_runtime_status_suspended(dev))
1496                                goto Complete;
1497
1498                        pm_runtime_enable(dev);
1499                }
1500                dev->power.direct_complete = false;
1501        }
1502
1503        dpm_watchdog_set(&wd, dev);
1504        device_lock(dev);
1505
1506        if (dev->pm_domain) {
1507                info = "power domain ";
1508                callback = pm_op(&dev->pm_domain->ops, state);
1509                goto Run;
1510        }
1511
1512        if (dev->type && dev->type->pm) {
1513                info = "type ";
1514                callback = pm_op(dev->type->pm, state);
1515                goto Run;
1516        }
1517
1518        if (dev->class && dev->class->pm) {
1519                info = "class ";
1520                callback = pm_op(dev->class->pm, state);
1521                goto Run;
1522        }
1523
1524        if (dev->bus) {
1525                if (dev->bus->pm) {
1526                        info = "bus ";
1527                        callback = pm_op(dev->bus->pm, state);
1528                } else if (dev->bus->suspend) {
1529                        pm_dev_dbg(dev, state, "legacy bus ");
1530                        error = legacy_suspend(dev, state, dev->bus->suspend,
1531                                                "legacy bus ");
1532                        goto End;
1533                }
1534        }
1535
1536 Run:
1537        if (!callback && dev->driver && dev->driver->pm) {
1538                info = "driver ";
1539                callback = pm_op(dev->driver->pm, state);
1540        }
1541
1542        error = dpm_run_callback(callback, dev, state, info);
1543
1544 End:
1545        if (!error) {
1546                struct device *parent = dev->parent;
1547
1548                dev->power.is_suspended = true;
1549                if (parent) {
1550                        spin_lock_irq(&parent->power.lock);
1551
1552                        dev->parent->power.direct_complete = false;
1553                        if (dev->power.wakeup_path
1554                            && !dev->parent->power.ignore_children)
1555                                dev->parent->power.wakeup_path = true;
1556
1557                        spin_unlock_irq(&parent->power.lock);
1558                }
1559                dpm_clear_suppliers_direct_complete(dev);
1560        }
1561
1562        device_unlock(dev);
1563        dpm_watchdog_clear(&wd);
1564
1565 Complete:
1566        if (error)
1567                async_error = error;
1568
1569        complete_all(&dev->power.completion);
1570        TRACE_SUSPEND(error);
1571        return error;
1572}
1573
1574static void async_suspend(void *data, async_cookie_t cookie)
1575{
1576        struct device *dev = (struct device *)data;
1577        int error;
1578
1579        error = __device_suspend(dev, pm_transition, true);
1580        if (error) {
1581                dpm_save_failed_dev(dev_name(dev));
1582                pm_dev_err(dev, pm_transition, " async", error);
1583        }
1584
1585        put_device(dev);
1586}
1587
1588static int device_suspend(struct device *dev)
1589{
1590        reinit_completion(&dev->power.completion);
1591
1592        if (is_async(dev)) {
1593                get_device(dev);
1594                async_schedule(async_suspend, dev);
1595                return 0;
1596        }
1597
1598        return __device_suspend(dev, pm_transition, false);
1599}
1600
1601/**
1602 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1603 * @state: PM transition of the system being carried out.
1604 */
1605int dpm_suspend(pm_message_t state)
1606{
1607        ktime_t starttime = ktime_get();
1608        int error = 0;
1609
1610        trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1611        might_sleep();
1612
1613        cpufreq_suspend();
1614
1615        mutex_lock(&dpm_list_mtx);
1616        pm_transition = state;
1617        async_error = 0;
1618        while (!list_empty(&dpm_prepared_list)) {
1619                struct device *dev = to_device(dpm_prepared_list.prev);
1620
1621                get_device(dev);
1622                mutex_unlock(&dpm_list_mtx);
1623
1624                error = device_suspend(dev);
1625
1626                mutex_lock(&dpm_list_mtx);
1627                if (error) {
1628                        pm_dev_err(dev, state, "", error);
1629                        dpm_save_failed_dev(dev_name(dev));
1630                        put_device(dev);
1631                        break;
1632                }
1633                if (!list_empty(&dev->power.entry))
1634                        list_move(&dev->power.entry, &dpm_suspended_list);
1635                put_device(dev);
1636                if (async_error)
1637                        break;
1638        }
1639        mutex_unlock(&dpm_list_mtx);
1640        async_synchronize_full();
1641        if (!error)
1642                error = async_error;
1643        if (error) {
1644                suspend_stats.failed_suspend++;
1645                dpm_save_failed_step(SUSPEND_SUSPEND);
1646        }
1647        dpm_show_time(starttime, state, error, NULL);
1648        trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1649        return error;
1650}
1651
1652/**
1653 * device_prepare - Prepare a device for system power transition.
1654 * @dev: Device to handle.
1655 * @state: PM transition of the system being carried out.
1656 *
1657 * Execute the ->prepare() callback(s) for given device.  No new children of the
1658 * device may be registered after this function has returned.
1659 */
1660static int device_prepare(struct device *dev, pm_message_t state)
1661{
1662        int (*callback)(struct device *) = NULL;
1663        int ret = 0;
1664
1665        if (dev->power.syscore)
1666                return 0;
1667
1668        WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
1669                !pm_runtime_enabled(dev));
1670
1671        /*
1672         * If a device's parent goes into runtime suspend at the wrong time,
1673         * it won't be possible to resume the device.  To prevent this we
1674         * block runtime suspend here, during the prepare phase, and allow
1675         * it again during the complete phase.
1676         */
1677        pm_runtime_get_noresume(dev);
1678
1679        device_lock(dev);
1680
1681        dev->power.wakeup_path = device_may_wakeup(dev);
1682
1683        if (dev->power.no_pm_callbacks) {
1684                ret = 1;        /* Let device go direct_complete */
1685                goto unlock;
1686        }
1687
1688        if (dev->pm_domain)
1689                callback = dev->pm_domain->ops.prepare;
1690        else if (dev->type && dev->type->pm)
1691                callback = dev->type->pm->prepare;
1692        else if (dev->class && dev->class->pm)
1693                callback = dev->class->pm->prepare;
1694        else if (dev->bus && dev->bus->pm)
1695                callback = dev->bus->pm->prepare;
1696
1697        if (!callback && dev->driver && dev->driver->pm)
1698                callback = dev->driver->pm->prepare;
1699
1700        if (callback)
1701                ret = callback(dev);
1702
1703unlock:
1704        device_unlock(dev);
1705
1706        if (ret < 0) {
1707                suspend_report_result(callback, ret);
1708                pm_runtime_put(dev);
1709                return ret;
1710        }
1711        /*
1712         * A positive return value from ->prepare() means "this device appears
1713         * to be runtime-suspended and its state is fine, so if it really is
1714         * runtime-suspended, you can leave it in that state provided that you
1715         * will do the same thing with all of its descendants".  This only
1716         * applies to suspend transitions, however.
1717         */
1718        spin_lock_irq(&dev->power.lock);
1719        dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1720                pm_runtime_suspended(dev) && ret > 0 &&
1721                !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1722        spin_unlock_irq(&dev->power.lock);
1723        return 0;
1724}
1725
1726/**
1727 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1728 * @state: PM transition of the system being carried out.
1729 *
1730 * Execute the ->prepare() callback(s) for all devices.
1731 */
1732int dpm_prepare(pm_message_t state)
1733{
1734        int error = 0;
1735
1736        trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1737        might_sleep();
1738
1739        /*
1740         * Give a chance for the known devices to complete their probes, before
1741         * disable probing of devices. This sync point is important at least
1742         * at boot time + hibernation restore.
1743         */
1744        wait_for_device_probe();
1745        /*
1746         * It is unsafe if probing of devices will happen during suspend or
1747         * hibernation and system behavior will be unpredictable in this case.
1748         * So, let's prohibit device's probing here and defer their probes
1749         * instead. The normal behavior will be restored in dpm_complete().
1750         */
1751        device_block_probing();
1752
1753        mutex_lock(&dpm_list_mtx);
1754        while (!list_empty(&dpm_list)) {
1755                struct device *dev = to_device(dpm_list.next);
1756
1757                get_device(dev);
1758                mutex_unlock(&dpm_list_mtx);
1759
1760                trace_device_pm_callback_start(dev, "", state.event);
1761                error = device_prepare(dev, state);
1762                trace_device_pm_callback_end(dev, error);
1763
1764                mutex_lock(&dpm_list_mtx);
1765                if (error) {
1766                        if (error == -EAGAIN) {
1767                                put_device(dev);
1768                                error = 0;
1769                                continue;
1770                        }
1771                        printk(KERN_INFO "PM: Device %s not prepared "
1772                                "for power transition: code %d\n",
1773                                dev_name(dev), error);
1774                        put_device(dev);
1775                        break;
1776                }
1777                dev->power.is_prepared = true;
1778                if (!list_empty(&dev->power.entry))
1779                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
1780                put_device(dev);
1781        }
1782        mutex_unlock(&dpm_list_mtx);
1783        trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1784        return error;
1785}
1786
1787/**
1788 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1789 * @state: PM transition of the system being carried out.
1790 *
1791 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1792 * callbacks for them.
1793 */
1794int dpm_suspend_start(pm_message_t state)
1795{
1796        int error;
1797
1798        error = dpm_prepare(state);
1799        if (error) {
1800                suspend_stats.failed_prepare++;
1801                dpm_save_failed_step(SUSPEND_PREPARE);
1802        } else
1803                error = dpm_suspend(state);
1804        return error;
1805}
1806EXPORT_SYMBOL_GPL(dpm_suspend_start);
1807
1808void __suspend_report_result(const char *function, void *fn, int ret)
1809{
1810        if (ret)
1811                printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1812}
1813EXPORT_SYMBOL_GPL(__suspend_report_result);
1814
1815/**
1816 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1817 * @dev: Device to wait for.
1818 * @subordinate: Device that needs to wait for @dev.
1819 */
1820int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1821{
1822        dpm_wait(dev, subordinate->power.async_suspend);
1823        return async_error;
1824}
1825EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1826
1827/**
1828 * dpm_for_each_dev - device iterator.
1829 * @data: data for the callback.
1830 * @fn: function to be called for each device.
1831 *
1832 * Iterate over devices in dpm_list, and call @fn for each device,
1833 * passing it @data.
1834 */
1835void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1836{
1837        struct device *dev;
1838
1839        if (!fn)
1840                return;
1841
1842        device_pm_lock();
1843        list_for_each_entry(dev, &dpm_list, power.entry)
1844                fn(dev, data);
1845        device_pm_unlock();
1846}
1847EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1848
1849static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1850{
1851        if (!ops)
1852                return true;
1853
1854        return !ops->prepare &&
1855               !ops->suspend &&
1856               !ops->suspend_late &&
1857               !ops->suspend_noirq &&
1858               !ops->resume_noirq &&
1859               !ops->resume_early &&
1860               !ops->resume &&
1861               !ops->complete;
1862}
1863
1864void device_pm_check_callbacks(struct device *dev)
1865{
1866        spin_lock_irq(&dev->power.lock);
1867        dev->power.no_pm_callbacks =
1868                (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1869                 !dev->bus->suspend && !dev->bus->resume)) &&
1870                (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1871                (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1872                (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1873                (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1874                 !dev->driver->suspend && !dev->driver->resume));
1875        spin_unlock_irq(&dev->power.lock);
1876}
1877
1878bool dev_pm_smart_suspend_and_suspended(struct device *dev)
1879{
1880        return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
1881                pm_runtime_status_suspended(dev);
1882}
1883