linux/drivers/base/power/main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * drivers/base/power/main.c - Where the driver meets power management.
   4 *
   5 * Copyright (c) 2003 Patrick Mochel
   6 * Copyright (c) 2003 Open Source Development Lab
   7 *
   8 * The driver model core calls device_pm_add() when a device is registered.
   9 * This will initialize the embedded device_pm_info object in the device
  10 * and add it to the list of power-controlled devices. sysfs entries for
  11 * controlling device power management will also be added.
  12 *
  13 * A separate list is used for keeping track of power info, because the power
  14 * domain dependencies may differ from the ancestral dependencies that the
  15 * subsystem list maintains.
  16 */
  17
  18#define pr_fmt(fmt) "PM: " fmt
  19
  20#include <linux/device.h>
  21#include <linux/export.h>
  22#include <linux/mutex.h>
  23#include <linux/pm.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/pm-trace.h>
  26#include <linux/pm_wakeirq.h>
  27#include <linux/interrupt.h>
  28#include <linux/sched.h>
  29#include <linux/sched/debug.h>
  30#include <linux/async.h>
  31#include <linux/suspend.h>
  32#include <trace/events/power.h>
  33#include <linux/cpufreq.h>
  34#include <linux/cpuidle.h>
  35#include <linux/devfreq.h>
  36#include <linux/timer.h>
  37
  38#include "../base.h"
  39#include "power.h"
  40
  41typedef int (*pm_callback_t)(struct device *);
  42
  43#define list_for_each_entry_rcu_locked(pos, head, member) \
  44        list_for_each_entry_rcu(pos, head, member, \
  45                        device_links_read_lock_held())
  46
  47/*
  48 * The entries in the dpm_list list are in a depth first order, simply
  49 * because children are guaranteed to be discovered after parents, and
  50 * are inserted at the back of the list on discovery.
  51 *
  52 * Since device_pm_add() may be called with a device lock held,
  53 * we must never try to acquire a device lock while holding
  54 * dpm_list_mutex.
  55 */
  56
  57LIST_HEAD(dpm_list);
  58static LIST_HEAD(dpm_prepared_list);
  59static LIST_HEAD(dpm_suspended_list);
  60static LIST_HEAD(dpm_late_early_list);
  61static LIST_HEAD(dpm_noirq_list);
  62
  63struct suspend_stats suspend_stats;
  64static DEFINE_MUTEX(dpm_list_mtx);
  65static pm_message_t pm_transition;
  66
  67static int async_error;
  68
  69static const char *pm_verb(int event)
  70{
  71        switch (event) {
  72        case PM_EVENT_SUSPEND:
  73                return "suspend";
  74        case PM_EVENT_RESUME:
  75                return "resume";
  76        case PM_EVENT_FREEZE:
  77                return "freeze";
  78        case PM_EVENT_QUIESCE:
  79                return "quiesce";
  80        case PM_EVENT_HIBERNATE:
  81                return "hibernate";
  82        case PM_EVENT_THAW:
  83                return "thaw";
  84        case PM_EVENT_RESTORE:
  85                return "restore";
  86        case PM_EVENT_RECOVER:
  87                return "recover";
  88        default:
  89                return "(unknown PM event)";
  90        }
  91}
  92
  93/**
  94 * device_pm_sleep_init - Initialize system suspend-related device fields.
  95 * @dev: Device object being initialized.
  96 */
  97void device_pm_sleep_init(struct device *dev)
  98{
  99        dev->power.is_prepared = false;
 100        dev->power.is_suspended = false;
 101        dev->power.is_noirq_suspended = false;
 102        dev->power.is_late_suspended = false;
 103        init_completion(&dev->power.completion);
 104        complete_all(&dev->power.completion);
 105        dev->power.wakeup = NULL;
 106        INIT_LIST_HEAD(&dev->power.entry);
 107}
 108
 109/**
 110 * device_pm_lock - Lock the list of active devices used by the PM core.
 111 */
 112void device_pm_lock(void)
 113{
 114        mutex_lock(&dpm_list_mtx);
 115}
 116
 117/**
 118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 119 */
 120void device_pm_unlock(void)
 121{
 122        mutex_unlock(&dpm_list_mtx);
 123}
 124
 125/**
 126 * device_pm_add - Add a device to the PM core's list of active devices.
 127 * @dev: Device to add to the list.
 128 */
 129void device_pm_add(struct device *dev)
 130{
 131        /* Skip PM setup/initialization. */
 132        if (device_pm_not_required(dev))
 133                return;
 134
 135        pr_debug("Adding info for %s:%s\n",
 136                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 137        device_pm_check_callbacks(dev);
 138        mutex_lock(&dpm_list_mtx);
 139        if (dev->parent && dev->parent->power.is_prepared)
 140                dev_warn(dev, "parent %s should not be sleeping\n",
 141                        dev_name(dev->parent));
 142        list_add_tail(&dev->power.entry, &dpm_list);
 143        dev->power.in_dpm_list = true;
 144        mutex_unlock(&dpm_list_mtx);
 145}
 146
 147/**
 148 * device_pm_remove - Remove a device from the PM core's list of active devices.
 149 * @dev: Device to be removed from the list.
 150 */
 151void device_pm_remove(struct device *dev)
 152{
 153        if (device_pm_not_required(dev))
 154                return;
 155
 156        pr_debug("Removing info for %s:%s\n",
 157                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 158        complete_all(&dev->power.completion);
 159        mutex_lock(&dpm_list_mtx);
 160        list_del_init(&dev->power.entry);
 161        dev->power.in_dpm_list = false;
 162        mutex_unlock(&dpm_list_mtx);
 163        device_wakeup_disable(dev);
 164        pm_runtime_remove(dev);
 165        device_pm_check_callbacks(dev);
 166}
 167
 168/**
 169 * device_pm_move_before - Move device in the PM core's list of active devices.
 170 * @deva: Device to move in dpm_list.
 171 * @devb: Device @deva should come before.
 172 */
 173void device_pm_move_before(struct device *deva, struct device *devb)
 174{
 175        pr_debug("Moving %s:%s before %s:%s\n",
 176                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 177                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 178        /* Delete deva from dpm_list and reinsert before devb. */
 179        list_move_tail(&deva->power.entry, &devb->power.entry);
 180}
 181
 182/**
 183 * device_pm_move_after - Move device in the PM core's list of active devices.
 184 * @deva: Device to move in dpm_list.
 185 * @devb: Device @deva should come after.
 186 */
 187void device_pm_move_after(struct device *deva, struct device *devb)
 188{
 189        pr_debug("Moving %s:%s after %s:%s\n",
 190                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 191                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 192        /* Delete deva from dpm_list and reinsert after devb. */
 193        list_move(&deva->power.entry, &devb->power.entry);
 194}
 195
 196/**
 197 * device_pm_move_last - Move device to end of the PM core's list of devices.
 198 * @dev: Device to move in dpm_list.
 199 */
 200void device_pm_move_last(struct device *dev)
 201{
 202        pr_debug("Moving %s:%s to end of list\n",
 203                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 204        list_move_tail(&dev->power.entry, &dpm_list);
 205}
 206
 207static ktime_t initcall_debug_start(struct device *dev, void *cb)
 208{
 209        if (!pm_print_times_enabled)
 210                return 0;
 211
 212        dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
 213                 task_pid_nr(current),
 214                 dev->parent ? dev_name(dev->parent) : "none");
 215        return ktime_get();
 216}
 217
 218static void initcall_debug_report(struct device *dev, ktime_t calltime,
 219                                  void *cb, int error)
 220{
 221        ktime_t rettime;
 222        s64 nsecs;
 223
 224        if (!pm_print_times_enabled)
 225                return;
 226
 227        rettime = ktime_get();
 228        nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 229
 230        dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
 231                 (unsigned long long)nsecs >> 10);
 232}
 233
 234/**
 235 * dpm_wait - Wait for a PM operation to complete.
 236 * @dev: Device to wait for.
 237 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 238 */
 239static void dpm_wait(struct device *dev, bool async)
 240{
 241        if (!dev)
 242                return;
 243
 244        if (async || (pm_async_enabled && dev->power.async_suspend))
 245                wait_for_completion(&dev->power.completion);
 246}
 247
 248static int dpm_wait_fn(struct device *dev, void *async_ptr)
 249{
 250        dpm_wait(dev, *((bool *)async_ptr));
 251        return 0;
 252}
 253
 254static void dpm_wait_for_children(struct device *dev, bool async)
 255{
 256       device_for_each_child(dev, &async, dpm_wait_fn);
 257}
 258
 259static void dpm_wait_for_suppliers(struct device *dev, bool async)
 260{
 261        struct device_link *link;
 262        int idx;
 263
 264        idx = device_links_read_lock();
 265
 266        /*
 267         * If the supplier goes away right after we've checked the link to it,
 268         * we'll wait for its completion to change the state, but that's fine,
 269         * because the only things that will block as a result are the SRCU
 270         * callbacks freeing the link objects for the links in the list we're
 271         * walking.
 272         */
 273        list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
 274                if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 275                        dpm_wait(link->supplier, async);
 276
 277        device_links_read_unlock(idx);
 278}
 279
 280static bool dpm_wait_for_superior(struct device *dev, bool async)
 281{
 282        struct device *parent;
 283
 284        /*
 285         * If the device is resumed asynchronously and the parent's callback
 286         * deletes both the device and the parent itself, the parent object may
 287         * be freed while this function is running, so avoid that by reference
 288         * counting the parent once more unless the device has been deleted
 289         * already (in which case return right away).
 290         */
 291        mutex_lock(&dpm_list_mtx);
 292
 293        if (!device_pm_initialized(dev)) {
 294                mutex_unlock(&dpm_list_mtx);
 295                return false;
 296        }
 297
 298        parent = get_device(dev->parent);
 299
 300        mutex_unlock(&dpm_list_mtx);
 301
 302        dpm_wait(parent, async);
 303        put_device(parent);
 304
 305        dpm_wait_for_suppliers(dev, async);
 306
 307        /*
 308         * If the parent's callback has deleted the device, attempting to resume
 309         * it would be invalid, so avoid doing that then.
 310         */
 311        return device_pm_initialized(dev);
 312}
 313
 314static void dpm_wait_for_consumers(struct device *dev, bool async)
 315{
 316        struct device_link *link;
 317        int idx;
 318
 319        idx = device_links_read_lock();
 320
 321        /*
 322         * The status of a device link can only be changed from "dormant" by a
 323         * probe, but that cannot happen during system suspend/resume.  In
 324         * theory it can change to "dormant" at that time, but then it is
 325         * reasonable to wait for the target device anyway (eg. if it goes
 326         * away, it's better to wait for it to go away completely and then
 327         * continue instead of trying to continue in parallel with its
 328         * unregistration).
 329         */
 330        list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
 331                if (READ_ONCE(link->status) != DL_STATE_DORMANT)
 332                        dpm_wait(link->consumer, async);
 333
 334        device_links_read_unlock(idx);
 335}
 336
 337static void dpm_wait_for_subordinate(struct device *dev, bool async)
 338{
 339        dpm_wait_for_children(dev, async);
 340        dpm_wait_for_consumers(dev, async);
 341}
 342
 343/**
 344 * pm_op - Return the PM operation appropriate for given PM event.
 345 * @ops: PM operations to choose from.
 346 * @state: PM transition of the system being carried out.
 347 */
 348static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 349{
 350        switch (state.event) {
 351#ifdef CONFIG_SUSPEND
 352        case PM_EVENT_SUSPEND:
 353                return ops->suspend;
 354        case PM_EVENT_RESUME:
 355                return ops->resume;
 356#endif /* CONFIG_SUSPEND */
 357#ifdef CONFIG_HIBERNATE_CALLBACKS
 358        case PM_EVENT_FREEZE:
 359        case PM_EVENT_QUIESCE:
 360                return ops->freeze;
 361        case PM_EVENT_HIBERNATE:
 362                return ops->poweroff;
 363        case PM_EVENT_THAW:
 364        case PM_EVENT_RECOVER:
 365                return ops->thaw;
 366                break;
 367        case PM_EVENT_RESTORE:
 368                return ops->restore;
 369#endif /* CONFIG_HIBERNATE_CALLBACKS */
 370        }
 371
 372        return NULL;
 373}
 374
 375/**
 376 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 377 * @ops: PM operations to choose from.
 378 * @state: PM transition of the system being carried out.
 379 *
 380 * Runtime PM is disabled for @dev while this function is being executed.
 381 */
 382static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 383                                      pm_message_t state)
 384{
 385        switch (state.event) {
 386#ifdef CONFIG_SUSPEND
 387        case PM_EVENT_SUSPEND:
 388                return ops->suspend_late;
 389        case PM_EVENT_RESUME:
 390                return ops->resume_early;
 391#endif /* CONFIG_SUSPEND */
 392#ifdef CONFIG_HIBERNATE_CALLBACKS
 393        case PM_EVENT_FREEZE:
 394        case PM_EVENT_QUIESCE:
 395                return ops->freeze_late;
 396        case PM_EVENT_HIBERNATE:
 397                return ops->poweroff_late;
 398        case PM_EVENT_THAW:
 399        case PM_EVENT_RECOVER:
 400                return ops->thaw_early;
 401        case PM_EVENT_RESTORE:
 402                return ops->restore_early;
 403#endif /* CONFIG_HIBERNATE_CALLBACKS */
 404        }
 405
 406        return NULL;
 407}
 408
 409/**
 410 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 411 * @ops: PM operations to choose from.
 412 * @state: PM transition of the system being carried out.
 413 *
 414 * The driver of @dev will not receive interrupts while this function is being
 415 * executed.
 416 */
 417static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 418{
 419        switch (state.event) {
 420#ifdef CONFIG_SUSPEND
 421        case PM_EVENT_SUSPEND:
 422                return ops->suspend_noirq;
 423        case PM_EVENT_RESUME:
 424                return ops->resume_noirq;
 425#endif /* CONFIG_SUSPEND */
 426#ifdef CONFIG_HIBERNATE_CALLBACKS
 427        case PM_EVENT_FREEZE:
 428        case PM_EVENT_QUIESCE:
 429                return ops->freeze_noirq;
 430        case PM_EVENT_HIBERNATE:
 431                return ops->poweroff_noirq;
 432        case PM_EVENT_THAW:
 433        case PM_EVENT_RECOVER:
 434                return ops->thaw_noirq;
 435        case PM_EVENT_RESTORE:
 436                return ops->restore_noirq;
 437#endif /* CONFIG_HIBERNATE_CALLBACKS */
 438        }
 439
 440        return NULL;
 441}
 442
 443static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
 444{
 445        dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 446                ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 447                ", may wakeup" : "");
 448}
 449
 450static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
 451                        int error)
 452{
 453        pr_err("Device %s failed to %s%s: error %d\n",
 454               dev_name(dev), pm_verb(state.event), info, error);
 455}
 456
 457static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
 458                          const char *info)
 459{
 460        ktime_t calltime;
 461        u64 usecs64;
 462        int usecs;
 463
 464        calltime = ktime_get();
 465        usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 466        do_div(usecs64, NSEC_PER_USEC);
 467        usecs = usecs64;
 468        if (usecs == 0)
 469                usecs = 1;
 470
 471        pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
 472                  info ?: "", info ? " " : "", pm_verb(state.event),
 473                  error ? "aborted" : "complete",
 474                  usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 475}
 476
 477static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 478                            pm_message_t state, const char *info)
 479{
 480        ktime_t calltime;
 481        int error;
 482
 483        if (!cb)
 484                return 0;
 485
 486        calltime = initcall_debug_start(dev, cb);
 487
 488        pm_dev_dbg(dev, state, info);
 489        trace_device_pm_callback_start(dev, info, state.event);
 490        error = cb(dev);
 491        trace_device_pm_callback_end(dev, error);
 492        suspend_report_result(cb, error);
 493
 494        initcall_debug_report(dev, calltime, cb, error);
 495
 496        return error;
 497}
 498
 499#ifdef CONFIG_DPM_WATCHDOG
 500struct dpm_watchdog {
 501        struct device           *dev;
 502        struct task_struct      *tsk;
 503        struct timer_list       timer;
 504};
 505
 506#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 507        struct dpm_watchdog wd
 508
 509/**
 510 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 511 * @t: The timer that PM watchdog depends on.
 512 *
 513 * Called when a driver has timed out suspending or resuming.
 514 * There's not much we can do here to recover so panic() to
 515 * capture a crash-dump in pstore.
 516 */
 517static void dpm_watchdog_handler(struct timer_list *t)
 518{
 519        struct dpm_watchdog *wd = from_timer(wd, t, timer);
 520
 521        dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 522        show_stack(wd->tsk, NULL, KERN_EMERG);
 523        panic("%s %s: unrecoverable failure\n",
 524                dev_driver_string(wd->dev), dev_name(wd->dev));
 525}
 526
 527/**
 528 * dpm_watchdog_set - Enable pm watchdog for given device.
 529 * @wd: Watchdog. Must be allocated on the stack.
 530 * @dev: Device to handle.
 531 */
 532static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 533{
 534        struct timer_list *timer = &wd->timer;
 535
 536        wd->dev = dev;
 537        wd->tsk = current;
 538
 539        timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
 540        /* use same timeout value for both suspend and resume */
 541        timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 542        add_timer(timer);
 543}
 544
 545/**
 546 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 547 * @wd: Watchdog to disable.
 548 */
 549static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 550{
 551        struct timer_list *timer = &wd->timer;
 552
 553        del_timer_sync(timer);
 554        destroy_timer_on_stack(timer);
 555}
 556#else
 557#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 558#define dpm_watchdog_set(x, y)
 559#define dpm_watchdog_clear(x)
 560#endif
 561
 562/*------------------------- Resume routines -------------------------*/
 563
 564/**
 565 * dev_pm_skip_resume - System-wide device resume optimization check.
 566 * @dev: Target device.
 567 *
 568 * Return:
 569 * - %false if the transition under way is RESTORE.
 570 * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
 571 * - The logical negation of %power.must_resume otherwise (that is, when the
 572 *   transition under way is RESUME).
 573 */
 574bool dev_pm_skip_resume(struct device *dev)
 575{
 576        if (pm_transition.event == PM_EVENT_RESTORE)
 577                return false;
 578
 579        if (pm_transition.event == PM_EVENT_THAW)
 580                return dev_pm_skip_suspend(dev);
 581
 582        return !dev->power.must_resume;
 583}
 584
 585/**
 586 * device_resume_noirq - Execute a "noirq resume" callback for given device.
 587 * @dev: Device to handle.
 588 * @state: PM transition of the system being carried out.
 589 * @async: If true, the device is being resumed asynchronously.
 590 *
 591 * The driver of @dev will not receive interrupts while this function is being
 592 * executed.
 593 */
 594static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 595{
 596        pm_callback_t callback = NULL;
 597        const char *info = NULL;
 598        bool skip_resume;
 599        int error = 0;
 600
 601        TRACE_DEVICE(dev);
 602        TRACE_RESUME(0);
 603
 604        if (dev->power.syscore || dev->power.direct_complete)
 605                goto Out;
 606
 607        if (!dev->power.is_noirq_suspended)
 608                goto Out;
 609
 610        if (!dpm_wait_for_superior(dev, async))
 611                goto Out;
 612
 613        skip_resume = dev_pm_skip_resume(dev);
 614        /*
 615         * If the driver callback is skipped below or by the middle layer
 616         * callback and device_resume_early() also skips the driver callback for
 617         * this device later, it needs to appear as "suspended" to PM-runtime,
 618         * so change its status accordingly.
 619         *
 620         * Otherwise, the device is going to be resumed, so set its PM-runtime
 621         * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
 622         * to avoid confusing drivers that don't use it.
 623         */
 624        if (skip_resume)
 625                pm_runtime_set_suspended(dev);
 626        else if (dev_pm_skip_suspend(dev))
 627                pm_runtime_set_active(dev);
 628
 629        if (dev->pm_domain) {
 630                info = "noirq power domain ";
 631                callback = pm_noirq_op(&dev->pm_domain->ops, state);
 632        } else if (dev->type && dev->type->pm) {
 633                info = "noirq type ";
 634                callback = pm_noirq_op(dev->type->pm, state);
 635        } else if (dev->class && dev->class->pm) {
 636                info = "noirq class ";
 637                callback = pm_noirq_op(dev->class->pm, state);
 638        } else if (dev->bus && dev->bus->pm) {
 639                info = "noirq bus ";
 640                callback = pm_noirq_op(dev->bus->pm, state);
 641        }
 642        if (callback)
 643                goto Run;
 644
 645        if (skip_resume)
 646                goto Skip;
 647
 648        if (dev->driver && dev->driver->pm) {
 649                info = "noirq driver ";
 650                callback = pm_noirq_op(dev->driver->pm, state);
 651        }
 652
 653Run:
 654        error = dpm_run_callback(callback, dev, state, info);
 655
 656Skip:
 657        dev->power.is_noirq_suspended = false;
 658
 659Out:
 660        complete_all(&dev->power.completion);
 661        TRACE_RESUME(error);
 662        return error;
 663}
 664
 665static bool is_async(struct device *dev)
 666{
 667        return dev->power.async_suspend && pm_async_enabled
 668                && !pm_trace_is_enabled();
 669}
 670
 671static bool dpm_async_fn(struct device *dev, async_func_t func)
 672{
 673        reinit_completion(&dev->power.completion);
 674
 675        if (is_async(dev)) {
 676                get_device(dev);
 677                async_schedule_dev(func, dev);
 678                return true;
 679        }
 680
 681        return false;
 682}
 683
 684static void async_resume_noirq(void *data, async_cookie_t cookie)
 685{
 686        struct device *dev = (struct device *)data;
 687        int error;
 688
 689        error = device_resume_noirq(dev, pm_transition, true);
 690        if (error)
 691                pm_dev_err(dev, pm_transition, " async", error);
 692
 693        put_device(dev);
 694}
 695
 696static void dpm_noirq_resume_devices(pm_message_t state)
 697{
 698        struct device *dev;
 699        ktime_t starttime = ktime_get();
 700
 701        trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 702        mutex_lock(&dpm_list_mtx);
 703        pm_transition = state;
 704
 705        /*
 706         * Advanced the async threads upfront,
 707         * in case the starting of async threads is
 708         * delayed by non-async resuming devices.
 709         */
 710        list_for_each_entry(dev, &dpm_noirq_list, power.entry)
 711                dpm_async_fn(dev, async_resume_noirq);
 712
 713        while (!list_empty(&dpm_noirq_list)) {
 714                dev = to_device(dpm_noirq_list.next);
 715                get_device(dev);
 716                list_move_tail(&dev->power.entry, &dpm_late_early_list);
 717                mutex_unlock(&dpm_list_mtx);
 718
 719                if (!is_async(dev)) {
 720                        int error;
 721
 722                        error = device_resume_noirq(dev, state, false);
 723                        if (error) {
 724                                suspend_stats.failed_resume_noirq++;
 725                                dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 726                                dpm_save_failed_dev(dev_name(dev));
 727                                pm_dev_err(dev, state, " noirq", error);
 728                        }
 729                }
 730
 731                mutex_lock(&dpm_list_mtx);
 732                put_device(dev);
 733        }
 734        mutex_unlock(&dpm_list_mtx);
 735        async_synchronize_full();
 736        dpm_show_time(starttime, state, 0, "noirq");
 737        trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 738}
 739
 740/**
 741 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 742 * @state: PM transition of the system being carried out.
 743 *
 744 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
 745 * allow device drivers' interrupt handlers to be called.
 746 */
 747void dpm_resume_noirq(pm_message_t state)
 748{
 749        dpm_noirq_resume_devices(state);
 750
 751        resume_device_irqs();
 752        device_wakeup_disarm_wake_irqs();
 753
 754        cpuidle_resume();
 755}
 756
 757/**
 758 * device_resume_early - Execute an "early resume" callback for given device.
 759 * @dev: Device to handle.
 760 * @state: PM transition of the system being carried out.
 761 * @async: If true, the device is being resumed asynchronously.
 762 *
 763 * Runtime PM is disabled for @dev while this function is being executed.
 764 */
 765static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 766{
 767        pm_callback_t callback = NULL;
 768        const char *info = NULL;
 769        int error = 0;
 770
 771        TRACE_DEVICE(dev);
 772        TRACE_RESUME(0);
 773
 774        if (dev->power.syscore || dev->power.direct_complete)
 775                goto Out;
 776
 777        if (!dev->power.is_late_suspended)
 778                goto Out;
 779
 780        if (!dpm_wait_for_superior(dev, async))
 781                goto Out;
 782
 783        if (dev->pm_domain) {
 784                info = "early power domain ";
 785                callback = pm_late_early_op(&dev->pm_domain->ops, state);
 786        } else if (dev->type && dev->type->pm) {
 787                info = "early type ";
 788                callback = pm_late_early_op(dev->type->pm, state);
 789        } else if (dev->class && dev->class->pm) {
 790                info = "early class ";
 791                callback = pm_late_early_op(dev->class->pm, state);
 792        } else if (dev->bus && dev->bus->pm) {
 793                info = "early bus ";
 794                callback = pm_late_early_op(dev->bus->pm, state);
 795        }
 796        if (callback)
 797                goto Run;
 798
 799        if (dev_pm_skip_resume(dev))
 800                goto Skip;
 801
 802        if (dev->driver && dev->driver->pm) {
 803                info = "early driver ";
 804                callback = pm_late_early_op(dev->driver->pm, state);
 805        }
 806
 807Run:
 808        error = dpm_run_callback(callback, dev, state, info);
 809
 810Skip:
 811        dev->power.is_late_suspended = false;
 812
 813Out:
 814        TRACE_RESUME(error);
 815
 816        pm_runtime_enable(dev);
 817        complete_all(&dev->power.completion);
 818        return error;
 819}
 820
 821static void async_resume_early(void *data, async_cookie_t cookie)
 822{
 823        struct device *dev = (struct device *)data;
 824        int error;
 825
 826        error = device_resume_early(dev, pm_transition, true);
 827        if (error)
 828                pm_dev_err(dev, pm_transition, " async", error);
 829
 830        put_device(dev);
 831}
 832
 833/**
 834 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 835 * @state: PM transition of the system being carried out.
 836 */
 837void dpm_resume_early(pm_message_t state)
 838{
 839        struct device *dev;
 840        ktime_t starttime = ktime_get();
 841
 842        trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 843        mutex_lock(&dpm_list_mtx);
 844        pm_transition = state;
 845
 846        /*
 847         * Advanced the async threads upfront,
 848         * in case the starting of async threads is
 849         * delayed by non-async resuming devices.
 850         */
 851        list_for_each_entry(dev, &dpm_late_early_list, power.entry)
 852                dpm_async_fn(dev, async_resume_early);
 853
 854        while (!list_empty(&dpm_late_early_list)) {
 855                dev = to_device(dpm_late_early_list.next);
 856                get_device(dev);
 857                list_move_tail(&dev->power.entry, &dpm_suspended_list);
 858                mutex_unlock(&dpm_list_mtx);
 859
 860                if (!is_async(dev)) {
 861                        int error;
 862
 863                        error = device_resume_early(dev, state, false);
 864                        if (error) {
 865                                suspend_stats.failed_resume_early++;
 866                                dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 867                                dpm_save_failed_dev(dev_name(dev));
 868                                pm_dev_err(dev, state, " early", error);
 869                        }
 870                }
 871                mutex_lock(&dpm_list_mtx);
 872                put_device(dev);
 873        }
 874        mutex_unlock(&dpm_list_mtx);
 875        async_synchronize_full();
 876        dpm_show_time(starttime, state, 0, "early");
 877        trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 878}
 879
 880/**
 881 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 882 * @state: PM transition of the system being carried out.
 883 */
 884void dpm_resume_start(pm_message_t state)
 885{
 886        dpm_resume_noirq(state);
 887        dpm_resume_early(state);
 888}
 889EXPORT_SYMBOL_GPL(dpm_resume_start);
 890
 891/**
 892 * device_resume - Execute "resume" callbacks for given device.
 893 * @dev: Device to handle.
 894 * @state: PM transition of the system being carried out.
 895 * @async: If true, the device is being resumed asynchronously.
 896 */
 897static int device_resume(struct device *dev, pm_message_t state, bool async)
 898{
 899        pm_callback_t callback = NULL;
 900        const char *info = NULL;
 901        int error = 0;
 902        DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 903
 904        TRACE_DEVICE(dev);
 905        TRACE_RESUME(0);
 906
 907        if (dev->power.syscore)
 908                goto Complete;
 909
 910        if (dev->power.direct_complete) {
 911                /* Match the pm_runtime_disable() in __device_suspend(). */
 912                pm_runtime_enable(dev);
 913                goto Complete;
 914        }
 915
 916        if (!dpm_wait_for_superior(dev, async))
 917                goto Complete;
 918
 919        dpm_watchdog_set(&wd, dev);
 920        device_lock(dev);
 921
 922        /*
 923         * This is a fib.  But we'll allow new children to be added below
 924         * a resumed device, even if the device hasn't been completed yet.
 925         */
 926        dev->power.is_prepared = false;
 927
 928        if (!dev->power.is_suspended)
 929                goto Unlock;
 930
 931        if (dev->pm_domain) {
 932                info = "power domain ";
 933                callback = pm_op(&dev->pm_domain->ops, state);
 934                goto Driver;
 935        }
 936
 937        if (dev->type && dev->type->pm) {
 938                info = "type ";
 939                callback = pm_op(dev->type->pm, state);
 940                goto Driver;
 941        }
 942
 943        if (dev->class && dev->class->pm) {
 944                info = "class ";
 945                callback = pm_op(dev->class->pm, state);
 946                goto Driver;
 947        }
 948
 949        if (dev->bus) {
 950                if (dev->bus->pm) {
 951                        info = "bus ";
 952                        callback = pm_op(dev->bus->pm, state);
 953                } else if (dev->bus->resume) {
 954                        info = "legacy bus ";
 955                        callback = dev->bus->resume;
 956                        goto End;
 957                }
 958        }
 959
 960 Driver:
 961        if (!callback && dev->driver && dev->driver->pm) {
 962                info = "driver ";
 963                callback = pm_op(dev->driver->pm, state);
 964        }
 965
 966 End:
 967        error = dpm_run_callback(callback, dev, state, info);
 968        dev->power.is_suspended = false;
 969
 970 Unlock:
 971        device_unlock(dev);
 972        dpm_watchdog_clear(&wd);
 973
 974 Complete:
 975        complete_all(&dev->power.completion);
 976
 977        TRACE_RESUME(error);
 978
 979        return error;
 980}
 981
 982static void async_resume(void *data, async_cookie_t cookie)
 983{
 984        struct device *dev = (struct device *)data;
 985        int error;
 986
 987        error = device_resume(dev, pm_transition, true);
 988        if (error)
 989                pm_dev_err(dev, pm_transition, " async", error);
 990        put_device(dev);
 991}
 992
 993/**
 994 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 995 * @state: PM transition of the system being carried out.
 996 *
 997 * Execute the appropriate "resume" callback for all devices whose status
 998 * indicates that they are suspended.
 999 */
1000void dpm_resume(pm_message_t state)
1001{
1002        struct device *dev;
1003        ktime_t starttime = ktime_get();
1004
1005        trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1006        might_sleep();
1007
1008        mutex_lock(&dpm_list_mtx);
1009        pm_transition = state;
1010        async_error = 0;
1011
1012        list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1013                dpm_async_fn(dev, async_resume);
1014
1015        while (!list_empty(&dpm_suspended_list)) {
1016                dev = to_device(dpm_suspended_list.next);
1017                get_device(dev);
1018                if (!is_async(dev)) {
1019                        int error;
1020
1021                        mutex_unlock(&dpm_list_mtx);
1022
1023                        error = device_resume(dev, state, false);
1024                        if (error) {
1025                                suspend_stats.failed_resume++;
1026                                dpm_save_failed_step(SUSPEND_RESUME);
1027                                dpm_save_failed_dev(dev_name(dev));
1028                                pm_dev_err(dev, state, "", error);
1029                        }
1030
1031                        mutex_lock(&dpm_list_mtx);
1032                }
1033                if (!list_empty(&dev->power.entry))
1034                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
1035                put_device(dev);
1036        }
1037        mutex_unlock(&dpm_list_mtx);
1038        async_synchronize_full();
1039        dpm_show_time(starttime, state, 0, NULL);
1040
1041        cpufreq_resume();
1042        devfreq_resume();
1043        trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1044}
1045
1046/**
1047 * device_complete - Complete a PM transition for given device.
1048 * @dev: Device to handle.
1049 * @state: PM transition of the system being carried out.
1050 */
1051static void device_complete(struct device *dev, pm_message_t state)
1052{
1053        void (*callback)(struct device *) = NULL;
1054        const char *info = NULL;
1055
1056        if (dev->power.syscore)
1057                return;
1058
1059        device_lock(dev);
1060
1061        if (dev->pm_domain) {
1062                info = "completing power domain ";
1063                callback = dev->pm_domain->ops.complete;
1064        } else if (dev->type && dev->type->pm) {
1065                info = "completing type ";
1066                callback = dev->type->pm->complete;
1067        } else if (dev->class && dev->class->pm) {
1068                info = "completing class ";
1069                callback = dev->class->pm->complete;
1070        } else if (dev->bus && dev->bus->pm) {
1071                info = "completing bus ";
1072                callback = dev->bus->pm->complete;
1073        }
1074
1075        if (!callback && dev->driver && dev->driver->pm) {
1076                info = "completing driver ";
1077                callback = dev->driver->pm->complete;
1078        }
1079
1080        if (callback) {
1081                pm_dev_dbg(dev, state, info);
1082                callback(dev);
1083        }
1084
1085        device_unlock(dev);
1086
1087        pm_runtime_put(dev);
1088}
1089
1090/**
1091 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1092 * @state: PM transition of the system being carried out.
1093 *
1094 * Execute the ->complete() callbacks for all devices whose PM status is not
1095 * DPM_ON (this allows new devices to be registered).
1096 */
1097void dpm_complete(pm_message_t state)
1098{
1099        struct list_head list;
1100
1101        trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1102        might_sleep();
1103
1104        INIT_LIST_HEAD(&list);
1105        mutex_lock(&dpm_list_mtx);
1106        while (!list_empty(&dpm_prepared_list)) {
1107                struct device *dev = to_device(dpm_prepared_list.prev);
1108
1109                get_device(dev);
1110                dev->power.is_prepared = false;
1111                list_move(&dev->power.entry, &list);
1112                mutex_unlock(&dpm_list_mtx);
1113
1114                trace_device_pm_callback_start(dev, "", state.event);
1115                device_complete(dev, state);
1116                trace_device_pm_callback_end(dev, 0);
1117
1118                mutex_lock(&dpm_list_mtx);
1119                put_device(dev);
1120        }
1121        list_splice(&list, &dpm_list);
1122        mutex_unlock(&dpm_list_mtx);
1123
1124        /* Allow device probing and trigger re-probing of deferred devices */
1125        device_unblock_probing();
1126        trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1127}
1128
1129/**
1130 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1131 * @state: PM transition of the system being carried out.
1132 *
1133 * Execute "resume" callbacks for all devices and complete the PM transition of
1134 * the system.
1135 */
1136void dpm_resume_end(pm_message_t state)
1137{
1138        dpm_resume(state);
1139        dpm_complete(state);
1140}
1141EXPORT_SYMBOL_GPL(dpm_resume_end);
1142
1143
1144/*------------------------- Suspend routines -------------------------*/
1145
1146/**
1147 * resume_event - Return a "resume" message for given "suspend" sleep state.
1148 * @sleep_state: PM message representing a sleep state.
1149 *
1150 * Return a PM message representing the resume event corresponding to given
1151 * sleep state.
1152 */
1153static pm_message_t resume_event(pm_message_t sleep_state)
1154{
1155        switch (sleep_state.event) {
1156        case PM_EVENT_SUSPEND:
1157                return PMSG_RESUME;
1158        case PM_EVENT_FREEZE:
1159        case PM_EVENT_QUIESCE:
1160                return PMSG_RECOVER;
1161        case PM_EVENT_HIBERNATE:
1162                return PMSG_RESTORE;
1163        }
1164        return PMSG_ON;
1165}
1166
1167static void dpm_superior_set_must_resume(struct device *dev)
1168{
1169        struct device_link *link;
1170        int idx;
1171
1172        if (dev->parent)
1173                dev->parent->power.must_resume = true;
1174
1175        idx = device_links_read_lock();
1176
1177        list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1178                link->supplier->power.must_resume = true;
1179
1180        device_links_read_unlock(idx);
1181}
1182
1183/**
1184 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1185 * @dev: Device to handle.
1186 * @state: PM transition of the system being carried out.
1187 * @async: If true, the device is being suspended asynchronously.
1188 *
1189 * The driver of @dev will not receive interrupts while this function is being
1190 * executed.
1191 */
1192static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1193{
1194        pm_callback_t callback = NULL;
1195        const char *info = NULL;
1196        int error = 0;
1197
1198        TRACE_DEVICE(dev);
1199        TRACE_SUSPEND(0);
1200
1201        dpm_wait_for_subordinate(dev, async);
1202
1203        if (async_error)
1204                goto Complete;
1205
1206        if (dev->power.syscore || dev->power.direct_complete)
1207                goto Complete;
1208
1209        if (dev->pm_domain) {
1210                info = "noirq power domain ";
1211                callback = pm_noirq_op(&dev->pm_domain->ops, state);
1212        } else if (dev->type && dev->type->pm) {
1213                info = "noirq type ";
1214                callback = pm_noirq_op(dev->type->pm, state);
1215        } else if (dev->class && dev->class->pm) {
1216                info = "noirq class ";
1217                callback = pm_noirq_op(dev->class->pm, state);
1218        } else if (dev->bus && dev->bus->pm) {
1219                info = "noirq bus ";
1220                callback = pm_noirq_op(dev->bus->pm, state);
1221        }
1222        if (callback)
1223                goto Run;
1224
1225        if (dev_pm_skip_suspend(dev))
1226                goto Skip;
1227
1228        if (dev->driver && dev->driver->pm) {
1229                info = "noirq driver ";
1230                callback = pm_noirq_op(dev->driver->pm, state);
1231        }
1232
1233Run:
1234        error = dpm_run_callback(callback, dev, state, info);
1235        if (error) {
1236                async_error = error;
1237                goto Complete;
1238        }
1239
1240Skip:
1241        dev->power.is_noirq_suspended = true;
1242
1243        /*
1244         * Skipping the resume of devices that were in use right before the
1245         * system suspend (as indicated by their PM-runtime usage counters)
1246         * would be suboptimal.  Also resume them if doing that is not allowed
1247         * to be skipped.
1248         */
1249        if (atomic_read(&dev->power.usage_count) > 1 ||
1250            !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1251              dev->power.may_skip_resume))
1252                dev->power.must_resume = true;
1253
1254        if (dev->power.must_resume)
1255                dpm_superior_set_must_resume(dev);
1256
1257Complete:
1258        complete_all(&dev->power.completion);
1259        TRACE_SUSPEND(error);
1260        return error;
1261}
1262
1263static void async_suspend_noirq(void *data, async_cookie_t cookie)
1264{
1265        struct device *dev = (struct device *)data;
1266        int error;
1267
1268        error = __device_suspend_noirq(dev, pm_transition, true);
1269        if (error) {
1270                dpm_save_failed_dev(dev_name(dev));
1271                pm_dev_err(dev, pm_transition, " async", error);
1272        }
1273
1274        put_device(dev);
1275}
1276
1277static int device_suspend_noirq(struct device *dev)
1278{
1279        if (dpm_async_fn(dev, async_suspend_noirq))
1280                return 0;
1281
1282        return __device_suspend_noirq(dev, pm_transition, false);
1283}
1284
1285static int dpm_noirq_suspend_devices(pm_message_t state)
1286{
1287        ktime_t starttime = ktime_get();
1288        int error = 0;
1289
1290        trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1291        mutex_lock(&dpm_list_mtx);
1292        pm_transition = state;
1293        async_error = 0;
1294
1295        while (!list_empty(&dpm_late_early_list)) {
1296                struct device *dev = to_device(dpm_late_early_list.prev);
1297
1298                get_device(dev);
1299                mutex_unlock(&dpm_list_mtx);
1300
1301                error = device_suspend_noirq(dev);
1302
1303                mutex_lock(&dpm_list_mtx);
1304                if (error) {
1305                        pm_dev_err(dev, state, " noirq", error);
1306                        dpm_save_failed_dev(dev_name(dev));
1307                        put_device(dev);
1308                        break;
1309                }
1310                if (!list_empty(&dev->power.entry))
1311                        list_move(&dev->power.entry, &dpm_noirq_list);
1312                put_device(dev);
1313
1314                if (async_error)
1315                        break;
1316        }
1317        mutex_unlock(&dpm_list_mtx);
1318        async_synchronize_full();
1319        if (!error)
1320                error = async_error;
1321
1322        if (error) {
1323                suspend_stats.failed_suspend_noirq++;
1324                dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1325        }
1326        dpm_show_time(starttime, state, error, "noirq");
1327        trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1328        return error;
1329}
1330
1331/**
1332 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1333 * @state: PM transition of the system being carried out.
1334 *
1335 * Prevent device drivers' interrupt handlers from being called and invoke
1336 * "noirq" suspend callbacks for all non-sysdev devices.
1337 */
1338int dpm_suspend_noirq(pm_message_t state)
1339{
1340        int ret;
1341
1342        cpuidle_pause();
1343
1344        device_wakeup_arm_wake_irqs();
1345        suspend_device_irqs();
1346
1347        ret = dpm_noirq_suspend_devices(state);
1348        if (ret)
1349                dpm_resume_noirq(resume_event(state));
1350
1351        return ret;
1352}
1353
1354static void dpm_propagate_wakeup_to_parent(struct device *dev)
1355{
1356        struct device *parent = dev->parent;
1357
1358        if (!parent)
1359                return;
1360
1361        spin_lock_irq(&parent->power.lock);
1362
1363        if (dev->power.wakeup_path && !parent->power.ignore_children)
1364                parent->power.wakeup_path = true;
1365
1366        spin_unlock_irq(&parent->power.lock);
1367}
1368
1369/**
1370 * __device_suspend_late - Execute a "late suspend" callback for given device.
1371 * @dev: Device to handle.
1372 * @state: PM transition of the system being carried out.
1373 * @async: If true, the device is being suspended asynchronously.
1374 *
1375 * Runtime PM is disabled for @dev while this function is being executed.
1376 */
1377static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1378{
1379        pm_callback_t callback = NULL;
1380        const char *info = NULL;
1381        int error = 0;
1382
1383        TRACE_DEVICE(dev);
1384        TRACE_SUSPEND(0);
1385
1386        __pm_runtime_disable(dev, false);
1387
1388        dpm_wait_for_subordinate(dev, async);
1389
1390        if (async_error)
1391                goto Complete;
1392
1393        if (pm_wakeup_pending()) {
1394                async_error = -EBUSY;
1395                goto Complete;
1396        }
1397
1398        if (dev->power.syscore || dev->power.direct_complete)
1399                goto Complete;
1400
1401        if (dev->pm_domain) {
1402                info = "late power domain ";
1403                callback = pm_late_early_op(&dev->pm_domain->ops, state);
1404        } else if (dev->type && dev->type->pm) {
1405                info = "late type ";
1406                callback = pm_late_early_op(dev->type->pm, state);
1407        } else if (dev->class && dev->class->pm) {
1408                info = "late class ";
1409                callback = pm_late_early_op(dev->class->pm, state);
1410        } else if (dev->bus && dev->bus->pm) {
1411                info = "late bus ";
1412                callback = pm_late_early_op(dev->bus->pm, state);
1413        }
1414        if (callback)
1415                goto Run;
1416
1417        if (dev_pm_skip_suspend(dev))
1418                goto Skip;
1419
1420        if (dev->driver && dev->driver->pm) {
1421                info = "late driver ";
1422                callback = pm_late_early_op(dev->driver->pm, state);
1423        }
1424
1425Run:
1426        error = dpm_run_callback(callback, dev, state, info);
1427        if (error) {
1428                async_error = error;
1429                goto Complete;
1430        }
1431        dpm_propagate_wakeup_to_parent(dev);
1432
1433Skip:
1434        dev->power.is_late_suspended = true;
1435
1436Complete:
1437        TRACE_SUSPEND(error);
1438        complete_all(&dev->power.completion);
1439        return error;
1440}
1441
1442static void async_suspend_late(void *data, async_cookie_t cookie)
1443{
1444        struct device *dev = (struct device *)data;
1445        int error;
1446
1447        error = __device_suspend_late(dev, pm_transition, true);
1448        if (error) {
1449                dpm_save_failed_dev(dev_name(dev));
1450                pm_dev_err(dev, pm_transition, " async", error);
1451        }
1452        put_device(dev);
1453}
1454
1455static int device_suspend_late(struct device *dev)
1456{
1457        if (dpm_async_fn(dev, async_suspend_late))
1458                return 0;
1459
1460        return __device_suspend_late(dev, pm_transition, false);
1461}
1462
1463/**
1464 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1465 * @state: PM transition of the system being carried out.
1466 */
1467int dpm_suspend_late(pm_message_t state)
1468{
1469        ktime_t starttime = ktime_get();
1470        int error = 0;
1471
1472        trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1473        mutex_lock(&dpm_list_mtx);
1474        pm_transition = state;
1475        async_error = 0;
1476
1477        while (!list_empty(&dpm_suspended_list)) {
1478                struct device *dev = to_device(dpm_suspended_list.prev);
1479
1480                get_device(dev);
1481                mutex_unlock(&dpm_list_mtx);
1482
1483                error = device_suspend_late(dev);
1484
1485                mutex_lock(&dpm_list_mtx);
1486                if (!list_empty(&dev->power.entry))
1487                        list_move(&dev->power.entry, &dpm_late_early_list);
1488
1489                if (error) {
1490                        pm_dev_err(dev, state, " late", error);
1491                        dpm_save_failed_dev(dev_name(dev));
1492                        put_device(dev);
1493                        break;
1494                }
1495                put_device(dev);
1496
1497                if (async_error)
1498                        break;
1499        }
1500        mutex_unlock(&dpm_list_mtx);
1501        async_synchronize_full();
1502        if (!error)
1503                error = async_error;
1504        if (error) {
1505                suspend_stats.failed_suspend_late++;
1506                dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1507                dpm_resume_early(resume_event(state));
1508        }
1509        dpm_show_time(starttime, state, error, "late");
1510        trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1511        return error;
1512}
1513
1514/**
1515 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1516 * @state: PM transition of the system being carried out.
1517 */
1518int dpm_suspend_end(pm_message_t state)
1519{
1520        ktime_t starttime = ktime_get();
1521        int error;
1522
1523        error = dpm_suspend_late(state);
1524        if (error)
1525                goto out;
1526
1527        error = dpm_suspend_noirq(state);
1528        if (error)
1529                dpm_resume_early(resume_event(state));
1530
1531out:
1532        dpm_show_time(starttime, state, error, "end");
1533        return error;
1534}
1535EXPORT_SYMBOL_GPL(dpm_suspend_end);
1536
1537/**
1538 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1539 * @dev: Device to suspend.
1540 * @state: PM transition of the system being carried out.
1541 * @cb: Suspend callback to execute.
1542 * @info: string description of caller.
1543 */
1544static int legacy_suspend(struct device *dev, pm_message_t state,
1545                          int (*cb)(struct device *dev, pm_message_t state),
1546                          const char *info)
1547{
1548        int error;
1549        ktime_t calltime;
1550
1551        calltime = initcall_debug_start(dev, cb);
1552
1553        trace_device_pm_callback_start(dev, info, state.event);
1554        error = cb(dev, state);
1555        trace_device_pm_callback_end(dev, error);
1556        suspend_report_result(cb, error);
1557
1558        initcall_debug_report(dev, calltime, cb, error);
1559
1560        return error;
1561}
1562
1563static void dpm_clear_superiors_direct_complete(struct device *dev)
1564{
1565        struct device_link *link;
1566        int idx;
1567
1568        if (dev->parent) {
1569                spin_lock_irq(&dev->parent->power.lock);
1570                dev->parent->power.direct_complete = false;
1571                spin_unlock_irq(&dev->parent->power.lock);
1572        }
1573
1574        idx = device_links_read_lock();
1575
1576        list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1577                spin_lock_irq(&link->supplier->power.lock);
1578                link->supplier->power.direct_complete = false;
1579                spin_unlock_irq(&link->supplier->power.lock);
1580        }
1581
1582        device_links_read_unlock(idx);
1583}
1584
1585/**
1586 * __device_suspend - Execute "suspend" callbacks for given device.
1587 * @dev: Device to handle.
1588 * @state: PM transition of the system being carried out.
1589 * @async: If true, the device is being suspended asynchronously.
1590 */
1591static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1592{
1593        pm_callback_t callback = NULL;
1594        const char *info = NULL;
1595        int error = 0;
1596        DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1597
1598        TRACE_DEVICE(dev);
1599        TRACE_SUSPEND(0);
1600
1601        dpm_wait_for_subordinate(dev, async);
1602
1603        if (async_error) {
1604                dev->power.direct_complete = false;
1605                goto Complete;
1606        }
1607
1608        /*
1609         * If a device configured to wake up the system from sleep states
1610         * has been suspended at run time and there's a resume request pending
1611         * for it, this is equivalent to the device signaling wakeup, so the
1612         * system suspend operation should be aborted.
1613         */
1614        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1615                pm_wakeup_event(dev, 0);
1616
1617        if (pm_wakeup_pending()) {
1618                dev->power.direct_complete = false;
1619                async_error = -EBUSY;
1620                goto Complete;
1621        }
1622
1623        if (dev->power.syscore)
1624                goto Complete;
1625
1626        /* Avoid direct_complete to let wakeup_path propagate. */
1627        if (device_may_wakeup(dev) || dev->power.wakeup_path)
1628                dev->power.direct_complete = false;
1629
1630        if (dev->power.direct_complete) {
1631                if (pm_runtime_status_suspended(dev)) {
1632                        pm_runtime_disable(dev);
1633                        if (pm_runtime_status_suspended(dev)) {
1634                                pm_dev_dbg(dev, state, "direct-complete ");
1635                                goto Complete;
1636                        }
1637
1638                        pm_runtime_enable(dev);
1639                }
1640                dev->power.direct_complete = false;
1641        }
1642
1643        dev->power.may_skip_resume = true;
1644        dev->power.must_resume = false;
1645
1646        dpm_watchdog_set(&wd, dev);
1647        device_lock(dev);
1648
1649        if (dev->pm_domain) {
1650                info = "power domain ";
1651                callback = pm_op(&dev->pm_domain->ops, state);
1652                goto Run;
1653        }
1654
1655        if (dev->type && dev->type->pm) {
1656                info = "type ";
1657                callback = pm_op(dev->type->pm, state);
1658                goto Run;
1659        }
1660
1661        if (dev->class && dev->class->pm) {
1662                info = "class ";
1663                callback = pm_op(dev->class->pm, state);
1664                goto Run;
1665        }
1666
1667        if (dev->bus) {
1668                if (dev->bus->pm) {
1669                        info = "bus ";
1670                        callback = pm_op(dev->bus->pm, state);
1671                } else if (dev->bus->suspend) {
1672                        pm_dev_dbg(dev, state, "legacy bus ");
1673                        error = legacy_suspend(dev, state, dev->bus->suspend,
1674                                                "legacy bus ");
1675                        goto End;
1676                }
1677        }
1678
1679 Run:
1680        if (!callback && dev->driver && dev->driver->pm) {
1681                info = "driver ";
1682                callback = pm_op(dev->driver->pm, state);
1683        }
1684
1685        error = dpm_run_callback(callback, dev, state, info);
1686
1687 End:
1688        if (!error) {
1689                dev->power.is_suspended = true;
1690                if (device_may_wakeup(dev))
1691                        dev->power.wakeup_path = true;
1692
1693                dpm_propagate_wakeup_to_parent(dev);
1694                dpm_clear_superiors_direct_complete(dev);
1695        }
1696
1697        device_unlock(dev);
1698        dpm_watchdog_clear(&wd);
1699
1700 Complete:
1701        if (error)
1702                async_error = error;
1703
1704        complete_all(&dev->power.completion);
1705        TRACE_SUSPEND(error);
1706        return error;
1707}
1708
1709static void async_suspend(void *data, async_cookie_t cookie)
1710{
1711        struct device *dev = (struct device *)data;
1712        int error;
1713
1714        error = __device_suspend(dev, pm_transition, true);
1715        if (error) {
1716                dpm_save_failed_dev(dev_name(dev));
1717                pm_dev_err(dev, pm_transition, " async", error);
1718        }
1719
1720        put_device(dev);
1721}
1722
1723static int device_suspend(struct device *dev)
1724{
1725        if (dpm_async_fn(dev, async_suspend))
1726                return 0;
1727
1728        return __device_suspend(dev, pm_transition, false);
1729}
1730
1731/**
1732 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1733 * @state: PM transition of the system being carried out.
1734 */
1735int dpm_suspend(pm_message_t state)
1736{
1737        ktime_t starttime = ktime_get();
1738        int error = 0;
1739
1740        trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1741        might_sleep();
1742
1743        devfreq_suspend();
1744        cpufreq_suspend();
1745
1746        mutex_lock(&dpm_list_mtx);
1747        pm_transition = state;
1748        async_error = 0;
1749        while (!list_empty(&dpm_prepared_list)) {
1750                struct device *dev = to_device(dpm_prepared_list.prev);
1751
1752                get_device(dev);
1753                mutex_unlock(&dpm_list_mtx);
1754
1755                error = device_suspend(dev);
1756
1757                mutex_lock(&dpm_list_mtx);
1758                if (error) {
1759                        pm_dev_err(dev, state, "", error);
1760                        dpm_save_failed_dev(dev_name(dev));
1761                        put_device(dev);
1762                        break;
1763                }
1764                if (!list_empty(&dev->power.entry))
1765                        list_move(&dev->power.entry, &dpm_suspended_list);
1766                put_device(dev);
1767                if (async_error)
1768                        break;
1769        }
1770        mutex_unlock(&dpm_list_mtx);
1771        async_synchronize_full();
1772        if (!error)
1773                error = async_error;
1774        if (error) {
1775                suspend_stats.failed_suspend++;
1776                dpm_save_failed_step(SUSPEND_SUSPEND);
1777        }
1778        dpm_show_time(starttime, state, error, NULL);
1779        trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1780        return error;
1781}
1782
1783/**
1784 * device_prepare - Prepare a device for system power transition.
1785 * @dev: Device to handle.
1786 * @state: PM transition of the system being carried out.
1787 *
1788 * Execute the ->prepare() callback(s) for given device.  No new children of the
1789 * device may be registered after this function has returned.
1790 */
1791static int device_prepare(struct device *dev, pm_message_t state)
1792{
1793        int (*callback)(struct device *) = NULL;
1794        int ret = 0;
1795
1796        if (dev->power.syscore)
1797                return 0;
1798
1799        /*
1800         * If a device's parent goes into runtime suspend at the wrong time,
1801         * it won't be possible to resume the device.  To prevent this we
1802         * block runtime suspend here, during the prepare phase, and allow
1803         * it again during the complete phase.
1804         */
1805        pm_runtime_get_noresume(dev);
1806
1807        device_lock(dev);
1808
1809        dev->power.wakeup_path = false;
1810
1811        if (dev->power.no_pm_callbacks)
1812                goto unlock;
1813
1814        if (dev->pm_domain)
1815                callback = dev->pm_domain->ops.prepare;
1816        else if (dev->type && dev->type->pm)
1817                callback = dev->type->pm->prepare;
1818        else if (dev->class && dev->class->pm)
1819                callback = dev->class->pm->prepare;
1820        else if (dev->bus && dev->bus->pm)
1821                callback = dev->bus->pm->prepare;
1822
1823        if (!callback && dev->driver && dev->driver->pm)
1824                callback = dev->driver->pm->prepare;
1825
1826        if (callback)
1827                ret = callback(dev);
1828
1829unlock:
1830        device_unlock(dev);
1831
1832        if (ret < 0) {
1833                suspend_report_result(callback, ret);
1834                pm_runtime_put(dev);
1835                return ret;
1836        }
1837        /*
1838         * A positive return value from ->prepare() means "this device appears
1839         * to be runtime-suspended and its state is fine, so if it really is
1840         * runtime-suspended, you can leave it in that state provided that you
1841         * will do the same thing with all of its descendants".  This only
1842         * applies to suspend transitions, however.
1843         */
1844        spin_lock_irq(&dev->power.lock);
1845        dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1846                (ret > 0 || dev->power.no_pm_callbacks) &&
1847                !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1848        spin_unlock_irq(&dev->power.lock);
1849        return 0;
1850}
1851
1852/**
1853 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1854 * @state: PM transition of the system being carried out.
1855 *
1856 * Execute the ->prepare() callback(s) for all devices.
1857 */
1858int dpm_prepare(pm_message_t state)
1859{
1860        int error = 0;
1861
1862        trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1863        might_sleep();
1864
1865        /*
1866         * Give a chance for the known devices to complete their probes, before
1867         * disable probing of devices. This sync point is important at least
1868         * at boot time + hibernation restore.
1869         */
1870        wait_for_device_probe();
1871        /*
1872         * It is unsafe if probing of devices will happen during suspend or
1873         * hibernation and system behavior will be unpredictable in this case.
1874         * So, let's prohibit device's probing here and defer their probes
1875         * instead. The normal behavior will be restored in dpm_complete().
1876         */
1877        device_block_probing();
1878
1879        mutex_lock(&dpm_list_mtx);
1880        while (!list_empty(&dpm_list)) {
1881                struct device *dev = to_device(dpm_list.next);
1882
1883                get_device(dev);
1884                mutex_unlock(&dpm_list_mtx);
1885
1886                trace_device_pm_callback_start(dev, "", state.event);
1887                error = device_prepare(dev, state);
1888                trace_device_pm_callback_end(dev, error);
1889
1890                mutex_lock(&dpm_list_mtx);
1891                if (error) {
1892                        if (error == -EAGAIN) {
1893                                put_device(dev);
1894                                error = 0;
1895                                continue;
1896                        }
1897                        pr_info("Device %s not prepared for power transition: code %d\n",
1898                                dev_name(dev), error);
1899                        put_device(dev);
1900                        break;
1901                }
1902                dev->power.is_prepared = true;
1903                if (!list_empty(&dev->power.entry))
1904                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
1905                put_device(dev);
1906        }
1907        mutex_unlock(&dpm_list_mtx);
1908        trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1909        return error;
1910}
1911
1912/**
1913 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1914 * @state: PM transition of the system being carried out.
1915 *
1916 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1917 * callbacks for them.
1918 */
1919int dpm_suspend_start(pm_message_t state)
1920{
1921        ktime_t starttime = ktime_get();
1922        int error;
1923
1924        error = dpm_prepare(state);
1925        if (error) {
1926                suspend_stats.failed_prepare++;
1927                dpm_save_failed_step(SUSPEND_PREPARE);
1928        } else
1929                error = dpm_suspend(state);
1930        dpm_show_time(starttime, state, error, "start");
1931        return error;
1932}
1933EXPORT_SYMBOL_GPL(dpm_suspend_start);
1934
1935void __suspend_report_result(const char *function, void *fn, int ret)
1936{
1937        if (ret)
1938                pr_err("%s(): %pS returns %d\n", function, fn, ret);
1939}
1940EXPORT_SYMBOL_GPL(__suspend_report_result);
1941
1942/**
1943 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1944 * @subordinate: Device that needs to wait for @dev.
1945 * @dev: Device to wait for.
1946 */
1947int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1948{
1949        dpm_wait(dev, subordinate->power.async_suspend);
1950        return async_error;
1951}
1952EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1953
1954/**
1955 * dpm_for_each_dev - device iterator.
1956 * @data: data for the callback.
1957 * @fn: function to be called for each device.
1958 *
1959 * Iterate over devices in dpm_list, and call @fn for each device,
1960 * passing it @data.
1961 */
1962void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1963{
1964        struct device *dev;
1965
1966        if (!fn)
1967                return;
1968
1969        device_pm_lock();
1970        list_for_each_entry(dev, &dpm_list, power.entry)
1971                fn(dev, data);
1972        device_pm_unlock();
1973}
1974EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1975
1976static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1977{
1978        if (!ops)
1979                return true;
1980
1981        return !ops->prepare &&
1982               !ops->suspend &&
1983               !ops->suspend_late &&
1984               !ops->suspend_noirq &&
1985               !ops->resume_noirq &&
1986               !ops->resume_early &&
1987               !ops->resume &&
1988               !ops->complete;
1989}
1990
1991void device_pm_check_callbacks(struct device *dev)
1992{
1993        spin_lock_irq(&dev->power.lock);
1994        dev->power.no_pm_callbacks =
1995                (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1996                 !dev->bus->suspend && !dev->bus->resume)) &&
1997                (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1998                (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1999                (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2000                (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2001                 !dev->driver->suspend && !dev->driver->resume));
2002        spin_unlock_irq(&dev->power.lock);
2003}
2004
2005bool dev_pm_skip_suspend(struct device *dev)
2006{
2007        return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2008                pm_runtime_status_suspended(dev);
2009}
2010