linux/drivers/base/power/main.c
<<
>>
Prefs
   1/*
   2 * drivers/base/power/main.c - Where the driver meets power management.
   3 *
   4 * Copyright (c) 2003 Patrick Mochel
   5 * Copyright (c) 2003 Open Source Development Lab
   6 *
   7 * This file is released under the GPLv2
   8 *
   9 *
  10 * The driver model core calls device_pm_add() when a device is registered.
  11 * This will initialize the embedded device_pm_info object in the device
  12 * and add it to the list of power-controlled devices. sysfs entries for
  13 * controlling device power management will also be added.
  14 *
  15 * A separate list is used for keeping track of power info, because the power
  16 * domain dependencies may differ from the ancestral dependencies that the
  17 * subsystem list maintains.
  18 */
  19
  20#include <linux/device.h>
  21#include <linux/kallsyms.h>
  22#include <linux/export.h>
  23#include <linux/mutex.h>
  24#include <linux/pm.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/resume-trace.h>
  27#include <linux/interrupt.h>
  28#include <linux/sched.h>
  29#include <linux/async.h>
  30#include <linux/suspend.h>
  31#include <trace/events/power.h>
  32#include <linux/cpufreq.h>
  33#include <linux/cpuidle.h>
  34#include <linux/timer.h>
  35
  36#include "../base.h"
  37#include "power.h"
  38
  39typedef int (*pm_callback_t)(struct device *);
  40
  41/*
  42 * The entries in the dpm_list list are in a depth first order, simply
  43 * because children are guaranteed to be discovered after parents, and
  44 * are inserted at the back of the list on discovery.
  45 *
  46 * Since device_pm_add() may be called with a device lock held,
  47 * we must never try to acquire a device lock while holding
  48 * dpm_list_mutex.
  49 */
  50
  51LIST_HEAD(dpm_list);
  52static LIST_HEAD(dpm_prepared_list);
  53static LIST_HEAD(dpm_suspended_list);
  54static LIST_HEAD(dpm_late_early_list);
  55static LIST_HEAD(dpm_noirq_list);
  56
  57struct suspend_stats suspend_stats;
  58static DEFINE_MUTEX(dpm_list_mtx);
  59static pm_message_t pm_transition;
  60
  61static int async_error;
  62
  63static char *pm_verb(int event)
  64{
  65        switch (event) {
  66        case PM_EVENT_SUSPEND:
  67                return "suspend";
  68        case PM_EVENT_RESUME:
  69                return "resume";
  70        case PM_EVENT_FREEZE:
  71                return "freeze";
  72        case PM_EVENT_QUIESCE:
  73                return "quiesce";
  74        case PM_EVENT_HIBERNATE:
  75                return "hibernate";
  76        case PM_EVENT_THAW:
  77                return "thaw";
  78        case PM_EVENT_RESTORE:
  79                return "restore";
  80        case PM_EVENT_RECOVER:
  81                return "recover";
  82        default:
  83                return "(unknown PM event)";
  84        }
  85}
  86
  87/**
  88 * device_pm_sleep_init - Initialize system suspend-related device fields.
  89 * @dev: Device object being initialized.
  90 */
  91void device_pm_sleep_init(struct device *dev)
  92{
  93        dev->power.is_prepared = false;
  94        dev->power.is_suspended = false;
  95        dev->power.is_noirq_suspended = false;
  96        dev->power.is_late_suspended = false;
  97        init_completion(&dev->power.completion);
  98        complete_all(&dev->power.completion);
  99        dev->power.wakeup = NULL;
 100        INIT_LIST_HEAD(&dev->power.entry);
 101}
 102
 103/**
 104 * device_pm_lock - Lock the list of active devices used by the PM core.
 105 */
 106void device_pm_lock(void)
 107{
 108        mutex_lock(&dpm_list_mtx);
 109}
 110
 111/**
 112 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 113 */
 114void device_pm_unlock(void)
 115{
 116        mutex_unlock(&dpm_list_mtx);
 117}
 118
 119/**
 120 * device_pm_add - Add a device to the PM core's list of active devices.
 121 * @dev: Device to add to the list.
 122 */
 123void device_pm_add(struct device *dev)
 124{
 125        pr_debug("PM: Adding info for %s:%s\n",
 126                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 127        mutex_lock(&dpm_list_mtx);
 128        if (dev->parent && dev->parent->power.is_prepared)
 129                dev_warn(dev, "parent %s should not be sleeping\n",
 130                        dev_name(dev->parent));
 131        list_add_tail(&dev->power.entry, &dpm_list);
 132        mutex_unlock(&dpm_list_mtx);
 133}
 134
 135/**
 136 * device_pm_remove - Remove a device from the PM core's list of active devices.
 137 * @dev: Device to be removed from the list.
 138 */
 139void device_pm_remove(struct device *dev)
 140{
 141        pr_debug("PM: Removing info for %s:%s\n",
 142                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 143        complete_all(&dev->power.completion);
 144        mutex_lock(&dpm_list_mtx);
 145        list_del_init(&dev->power.entry);
 146        mutex_unlock(&dpm_list_mtx);
 147        device_wakeup_disable(dev);
 148        pm_runtime_remove(dev);
 149}
 150
 151/**
 152 * device_pm_move_before - Move device in the PM core's list of active devices.
 153 * @deva: Device to move in dpm_list.
 154 * @devb: Device @deva should come before.
 155 */
 156void device_pm_move_before(struct device *deva, struct device *devb)
 157{
 158        pr_debug("PM: Moving %s:%s before %s:%s\n",
 159                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 160                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 161        /* Delete deva from dpm_list and reinsert before devb. */
 162        list_move_tail(&deva->power.entry, &devb->power.entry);
 163}
 164
 165/**
 166 * device_pm_move_after - Move device in the PM core's list of active devices.
 167 * @deva: Device to move in dpm_list.
 168 * @devb: Device @deva should come after.
 169 */
 170void device_pm_move_after(struct device *deva, struct device *devb)
 171{
 172        pr_debug("PM: Moving %s:%s after %s:%s\n",
 173                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 174                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 175        /* Delete deva from dpm_list and reinsert after devb. */
 176        list_move(&deva->power.entry, &devb->power.entry);
 177}
 178
 179/**
 180 * device_pm_move_last - Move device to end of the PM core's list of devices.
 181 * @dev: Device to move in dpm_list.
 182 */
 183void device_pm_move_last(struct device *dev)
 184{
 185        pr_debug("PM: Moving %s:%s to end of list\n",
 186                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 187        list_move_tail(&dev->power.entry, &dpm_list);
 188}
 189
 190static ktime_t initcall_debug_start(struct device *dev)
 191{
 192        ktime_t calltime = ktime_set(0, 0);
 193
 194        if (pm_print_times_enabled) {
 195                pr_info("calling  %s+ @ %i, parent: %s\n",
 196                        dev_name(dev), task_pid_nr(current),
 197                        dev->parent ? dev_name(dev->parent) : "none");
 198                calltime = ktime_get();
 199        }
 200
 201        return calltime;
 202}
 203
 204static void initcall_debug_report(struct device *dev, ktime_t calltime,
 205                                  int error, pm_message_t state, char *info)
 206{
 207        ktime_t rettime;
 208        s64 nsecs;
 209
 210        rettime = ktime_get();
 211        nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 212
 213        if (pm_print_times_enabled) {
 214                pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
 215                        error, (unsigned long long)nsecs >> 10);
 216        }
 217}
 218
 219/**
 220 * dpm_wait - Wait for a PM operation to complete.
 221 * @dev: Device to wait for.
 222 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 223 */
 224static void dpm_wait(struct device *dev, bool async)
 225{
 226        if (!dev)
 227                return;
 228
 229        if (async || (pm_async_enabled && dev->power.async_suspend))
 230                wait_for_completion(&dev->power.completion);
 231}
 232
 233static int dpm_wait_fn(struct device *dev, void *async_ptr)
 234{
 235        dpm_wait(dev, *((bool *)async_ptr));
 236        return 0;
 237}
 238
 239static void dpm_wait_for_children(struct device *dev, bool async)
 240{
 241       device_for_each_child(dev, &async, dpm_wait_fn);
 242}
 243
 244/**
 245 * pm_op - Return the PM operation appropriate for given PM event.
 246 * @ops: PM operations to choose from.
 247 * @state: PM transition of the system being carried out.
 248 */
 249static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 250{
 251        switch (state.event) {
 252#ifdef CONFIG_SUSPEND
 253        case PM_EVENT_SUSPEND:
 254                return ops->suspend;
 255        case PM_EVENT_RESUME:
 256                return ops->resume;
 257#endif /* CONFIG_SUSPEND */
 258#ifdef CONFIG_HIBERNATE_CALLBACKS
 259        case PM_EVENT_FREEZE:
 260        case PM_EVENT_QUIESCE:
 261                return ops->freeze;
 262        case PM_EVENT_HIBERNATE:
 263                return ops->poweroff;
 264        case PM_EVENT_THAW:
 265        case PM_EVENT_RECOVER:
 266                return ops->thaw;
 267                break;
 268        case PM_EVENT_RESTORE:
 269                return ops->restore;
 270#endif /* CONFIG_HIBERNATE_CALLBACKS */
 271        }
 272
 273        return NULL;
 274}
 275
 276/**
 277 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 278 * @ops: PM operations to choose from.
 279 * @state: PM transition of the system being carried out.
 280 *
 281 * Runtime PM is disabled for @dev while this function is being executed.
 282 */
 283static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 284                                      pm_message_t state)
 285{
 286        switch (state.event) {
 287#ifdef CONFIG_SUSPEND
 288        case PM_EVENT_SUSPEND:
 289                return ops->suspend_late;
 290        case PM_EVENT_RESUME:
 291                return ops->resume_early;
 292#endif /* CONFIG_SUSPEND */
 293#ifdef CONFIG_HIBERNATE_CALLBACKS
 294        case PM_EVENT_FREEZE:
 295        case PM_EVENT_QUIESCE:
 296                return ops->freeze_late;
 297        case PM_EVENT_HIBERNATE:
 298                return ops->poweroff_late;
 299        case PM_EVENT_THAW:
 300        case PM_EVENT_RECOVER:
 301                return ops->thaw_early;
 302        case PM_EVENT_RESTORE:
 303                return ops->restore_early;
 304#endif /* CONFIG_HIBERNATE_CALLBACKS */
 305        }
 306
 307        return NULL;
 308}
 309
 310/**
 311 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 312 * @ops: PM operations to choose from.
 313 * @state: PM transition of the system being carried out.
 314 *
 315 * The driver of @dev will not receive interrupts while this function is being
 316 * executed.
 317 */
 318static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 319{
 320        switch (state.event) {
 321#ifdef CONFIG_SUSPEND
 322        case PM_EVENT_SUSPEND:
 323                return ops->suspend_noirq;
 324        case PM_EVENT_RESUME:
 325                return ops->resume_noirq;
 326#endif /* CONFIG_SUSPEND */
 327#ifdef CONFIG_HIBERNATE_CALLBACKS
 328        case PM_EVENT_FREEZE:
 329        case PM_EVENT_QUIESCE:
 330                return ops->freeze_noirq;
 331        case PM_EVENT_HIBERNATE:
 332                return ops->poweroff_noirq;
 333        case PM_EVENT_THAW:
 334        case PM_EVENT_RECOVER:
 335                return ops->thaw_noirq;
 336        case PM_EVENT_RESTORE:
 337                return ops->restore_noirq;
 338#endif /* CONFIG_HIBERNATE_CALLBACKS */
 339        }
 340
 341        return NULL;
 342}
 343
 344static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
 345{
 346        dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 347                ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 348                ", may wakeup" : "");
 349}
 350
 351static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
 352                        int error)
 353{
 354        printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
 355                dev_name(dev), pm_verb(state.event), info, error);
 356}
 357
 358static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
 359{
 360        ktime_t calltime;
 361        u64 usecs64;
 362        int usecs;
 363
 364        calltime = ktime_get();
 365        usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 366        do_div(usecs64, NSEC_PER_USEC);
 367        usecs = usecs64;
 368        if (usecs == 0)
 369                usecs = 1;
 370        pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
 371                info ?: "", info ? " " : "", pm_verb(state.event),
 372                usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 373}
 374
 375static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 376                            pm_message_t state, char *info)
 377{
 378        ktime_t calltime;
 379        int error;
 380
 381        if (!cb)
 382                return 0;
 383
 384        calltime = initcall_debug_start(dev);
 385
 386        pm_dev_dbg(dev, state, info);
 387        trace_device_pm_callback_start(dev, info, state.event);
 388        error = cb(dev);
 389        trace_device_pm_callback_end(dev, error);
 390        suspend_report_result(cb, error);
 391
 392        initcall_debug_report(dev, calltime, error, state, info);
 393
 394        return error;
 395}
 396
 397#ifdef CONFIG_DPM_WATCHDOG
 398struct dpm_watchdog {
 399        struct device           *dev;
 400        struct task_struct      *tsk;
 401        struct timer_list       timer;
 402};
 403
 404#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 405        struct dpm_watchdog wd
 406
 407/**
 408 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 409 * @data: Watchdog object address.
 410 *
 411 * Called when a driver has timed out suspending or resuming.
 412 * There's not much we can do here to recover so panic() to
 413 * capture a crash-dump in pstore.
 414 */
 415static void dpm_watchdog_handler(unsigned long data)
 416{
 417        struct dpm_watchdog *wd = (void *)data;
 418
 419        dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 420        show_stack(wd->tsk, NULL);
 421        panic("%s %s: unrecoverable failure\n",
 422                dev_driver_string(wd->dev), dev_name(wd->dev));
 423}
 424
 425/**
 426 * dpm_watchdog_set - Enable pm watchdog for given device.
 427 * @wd: Watchdog. Must be allocated on the stack.
 428 * @dev: Device to handle.
 429 */
 430static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 431{
 432        struct timer_list *timer = &wd->timer;
 433
 434        wd->dev = dev;
 435        wd->tsk = current;
 436
 437        init_timer_on_stack(timer);
 438        /* use same timeout value for both suspend and resume */
 439        timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 440        timer->function = dpm_watchdog_handler;
 441        timer->data = (unsigned long)wd;
 442        add_timer(timer);
 443}
 444
 445/**
 446 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 447 * @wd: Watchdog to disable.
 448 */
 449static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 450{
 451        struct timer_list *timer = &wd->timer;
 452
 453        del_timer_sync(timer);
 454        destroy_timer_on_stack(timer);
 455}
 456#else
 457#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 458#define dpm_watchdog_set(x, y)
 459#define dpm_watchdog_clear(x)
 460#endif
 461
 462/*------------------------- Resume routines -------------------------*/
 463
 464/**
 465 * device_resume_noirq - Execute an "early resume" callback for given device.
 466 * @dev: Device to handle.
 467 * @state: PM transition of the system being carried out.
 468 * @async: If true, the device is being resumed asynchronously.
 469 *
 470 * The driver of @dev will not receive interrupts while this function is being
 471 * executed.
 472 */
 473static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 474{
 475        pm_callback_t callback = NULL;
 476        char *info = NULL;
 477        int error = 0;
 478
 479        TRACE_DEVICE(dev);
 480        TRACE_RESUME(0);
 481
 482        if (dev->power.syscore || dev->power.direct_complete)
 483                goto Out;
 484
 485        if (!dev->power.is_noirq_suspended)
 486                goto Out;
 487
 488        dpm_wait(dev->parent, async);
 489
 490        if (dev->pm_domain) {
 491                info = "noirq power domain ";
 492                callback = pm_noirq_op(&dev->pm_domain->ops, state);
 493        } else if (dev->type && dev->type->pm) {
 494                info = "noirq type ";
 495                callback = pm_noirq_op(dev->type->pm, state);
 496        } else if (dev->class && dev->class->pm) {
 497                info = "noirq class ";
 498                callback = pm_noirq_op(dev->class->pm, state);
 499        } else if (dev->bus && dev->bus->pm) {
 500                info = "noirq bus ";
 501                callback = pm_noirq_op(dev->bus->pm, state);
 502        }
 503
 504        if (!callback && dev->driver && dev->driver->pm) {
 505                info = "noirq driver ";
 506                callback = pm_noirq_op(dev->driver->pm, state);
 507        }
 508
 509        error = dpm_run_callback(callback, dev, state, info);
 510        dev->power.is_noirq_suspended = false;
 511
 512 Out:
 513        complete_all(&dev->power.completion);
 514        TRACE_RESUME(error);
 515        return error;
 516}
 517
 518static bool is_async(struct device *dev)
 519{
 520        return dev->power.async_suspend && pm_async_enabled
 521                && !pm_trace_is_enabled();
 522}
 523
 524static void async_resume_noirq(void *data, async_cookie_t cookie)
 525{
 526        struct device *dev = (struct device *)data;
 527        int error;
 528
 529        error = device_resume_noirq(dev, pm_transition, true);
 530        if (error)
 531                pm_dev_err(dev, pm_transition, " async", error);
 532
 533        put_device(dev);
 534}
 535
 536/**
 537 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 538 * @state: PM transition of the system being carried out.
 539 *
 540 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
 541 * enable device drivers to receive interrupts.
 542 */
 543void dpm_resume_noirq(pm_message_t state)
 544{
 545        struct device *dev;
 546        ktime_t starttime = ktime_get();
 547
 548        trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
 549        mutex_lock(&dpm_list_mtx);
 550        pm_transition = state;
 551
 552        /*
 553         * Advanced the async threads upfront,
 554         * in case the starting of async threads is
 555         * delayed by non-async resuming devices.
 556         */
 557        list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
 558                reinit_completion(&dev->power.completion);
 559                if (is_async(dev)) {
 560                        get_device(dev);
 561                        async_schedule(async_resume_noirq, dev);
 562                }
 563        }
 564
 565        while (!list_empty(&dpm_noirq_list)) {
 566                dev = to_device(dpm_noirq_list.next);
 567                get_device(dev);
 568                list_move_tail(&dev->power.entry, &dpm_late_early_list);
 569                mutex_unlock(&dpm_list_mtx);
 570
 571                if (!is_async(dev)) {
 572                        int error;
 573
 574                        error = device_resume_noirq(dev, state, false);
 575                        if (error) {
 576                                suspend_stats.failed_resume_noirq++;
 577                                dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 578                                dpm_save_failed_dev(dev_name(dev));
 579                                pm_dev_err(dev, state, " noirq", error);
 580                        }
 581                }
 582
 583                mutex_lock(&dpm_list_mtx);
 584                put_device(dev);
 585        }
 586        mutex_unlock(&dpm_list_mtx);
 587        async_synchronize_full();
 588        dpm_show_time(starttime, state, "noirq");
 589        resume_device_irqs();
 590        cpuidle_resume();
 591        trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
 592}
 593
 594/**
 595 * device_resume_early - Execute an "early resume" callback for given device.
 596 * @dev: Device to handle.
 597 * @state: PM transition of the system being carried out.
 598 * @async: If true, the device is being resumed asynchronously.
 599 *
 600 * Runtime PM is disabled for @dev while this function is being executed.
 601 */
 602static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 603{
 604        pm_callback_t callback = NULL;
 605        char *info = NULL;
 606        int error = 0;
 607
 608        TRACE_DEVICE(dev);
 609        TRACE_RESUME(0);
 610
 611        if (dev->power.syscore || dev->power.direct_complete)
 612                goto Out;
 613
 614        if (!dev->power.is_late_suspended)
 615                goto Out;
 616
 617        dpm_wait(dev->parent, async);
 618
 619        if (dev->pm_domain) {
 620                info = "early power domain ";
 621                callback = pm_late_early_op(&dev->pm_domain->ops, state);
 622        } else if (dev->type && dev->type->pm) {
 623                info = "early type ";
 624                callback = pm_late_early_op(dev->type->pm, state);
 625        } else if (dev->class && dev->class->pm) {
 626                info = "early class ";
 627                callback = pm_late_early_op(dev->class->pm, state);
 628        } else if (dev->bus && dev->bus->pm) {
 629                info = "early bus ";
 630                callback = pm_late_early_op(dev->bus->pm, state);
 631        }
 632
 633        if (!callback && dev->driver && dev->driver->pm) {
 634                info = "early driver ";
 635                callback = pm_late_early_op(dev->driver->pm, state);
 636        }
 637
 638        error = dpm_run_callback(callback, dev, state, info);
 639        dev->power.is_late_suspended = false;
 640
 641 Out:
 642        TRACE_RESUME(error);
 643
 644        pm_runtime_enable(dev);
 645        complete_all(&dev->power.completion);
 646        return error;
 647}
 648
 649static void async_resume_early(void *data, async_cookie_t cookie)
 650{
 651        struct device *dev = (struct device *)data;
 652        int error;
 653
 654        error = device_resume_early(dev, pm_transition, true);
 655        if (error)
 656                pm_dev_err(dev, pm_transition, " async", error);
 657
 658        put_device(dev);
 659}
 660
 661/**
 662 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 663 * @state: PM transition of the system being carried out.
 664 */
 665void dpm_resume_early(pm_message_t state)
 666{
 667        struct device *dev;
 668        ktime_t starttime = ktime_get();
 669
 670        trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
 671        mutex_lock(&dpm_list_mtx);
 672        pm_transition = state;
 673
 674        /*
 675         * Advanced the async threads upfront,
 676         * in case the starting of async threads is
 677         * delayed by non-async resuming devices.
 678         */
 679        list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
 680                reinit_completion(&dev->power.completion);
 681                if (is_async(dev)) {
 682                        get_device(dev);
 683                        async_schedule(async_resume_early, dev);
 684                }
 685        }
 686
 687        while (!list_empty(&dpm_late_early_list)) {
 688                dev = to_device(dpm_late_early_list.next);
 689                get_device(dev);
 690                list_move_tail(&dev->power.entry, &dpm_suspended_list);
 691                mutex_unlock(&dpm_list_mtx);
 692
 693                if (!is_async(dev)) {
 694                        int error;
 695
 696                        error = device_resume_early(dev, state, false);
 697                        if (error) {
 698                                suspend_stats.failed_resume_early++;
 699                                dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 700                                dpm_save_failed_dev(dev_name(dev));
 701                                pm_dev_err(dev, state, " early", error);
 702                        }
 703                }
 704                mutex_lock(&dpm_list_mtx);
 705                put_device(dev);
 706        }
 707        mutex_unlock(&dpm_list_mtx);
 708        async_synchronize_full();
 709        dpm_show_time(starttime, state, "early");
 710        trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
 711}
 712
 713/**
 714 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 715 * @state: PM transition of the system being carried out.
 716 */
 717void dpm_resume_start(pm_message_t state)
 718{
 719        dpm_resume_noirq(state);
 720        dpm_resume_early(state);
 721}
 722EXPORT_SYMBOL_GPL(dpm_resume_start);
 723
 724/**
 725 * device_resume - Execute "resume" callbacks for given device.
 726 * @dev: Device to handle.
 727 * @state: PM transition of the system being carried out.
 728 * @async: If true, the device is being resumed asynchronously.
 729 */
 730static int device_resume(struct device *dev, pm_message_t state, bool async)
 731{
 732        pm_callback_t callback = NULL;
 733        char *info = NULL;
 734        int error = 0;
 735        DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 736
 737        TRACE_DEVICE(dev);
 738        TRACE_RESUME(0);
 739
 740        if (dev->power.syscore)
 741                goto Complete;
 742
 743        if (dev->power.direct_complete) {
 744                /* Match the pm_runtime_disable() in __device_suspend(). */
 745                pm_runtime_enable(dev);
 746                goto Complete;
 747        }
 748
 749        dpm_wait(dev->parent, async);
 750        dpm_watchdog_set(&wd, dev);
 751        device_lock(dev);
 752
 753        /*
 754         * This is a fib.  But we'll allow new children to be added below
 755         * a resumed device, even if the device hasn't been completed yet.
 756         */
 757        dev->power.is_prepared = false;
 758
 759        if (!dev->power.is_suspended)
 760                goto Unlock;
 761
 762        if (dev->pm_domain) {
 763                info = "power domain ";
 764                callback = pm_op(&dev->pm_domain->ops, state);
 765                goto Driver;
 766        }
 767
 768        if (dev->type && dev->type->pm) {
 769                info = "type ";
 770                callback = pm_op(dev->type->pm, state);
 771                goto Driver;
 772        }
 773
 774        if (dev->class) {
 775                if (dev->class->pm) {
 776                        info = "class ";
 777                        callback = pm_op(dev->class->pm, state);
 778                        goto Driver;
 779                } else if (dev->class->resume) {
 780                        info = "legacy class ";
 781                        callback = dev->class->resume;
 782                        goto End;
 783                }
 784        }
 785
 786        if (dev->bus) {
 787                if (dev->bus->pm) {
 788                        info = "bus ";
 789                        callback = pm_op(dev->bus->pm, state);
 790                } else if (dev->bus->resume) {
 791                        info = "legacy bus ";
 792                        callback = dev->bus->resume;
 793                        goto End;
 794                }
 795        }
 796
 797 Driver:
 798        if (!callback && dev->driver && dev->driver->pm) {
 799                info = "driver ";
 800                callback = pm_op(dev->driver->pm, state);
 801        }
 802
 803 End:
 804        error = dpm_run_callback(callback, dev, state, info);
 805        dev->power.is_suspended = false;
 806
 807 Unlock:
 808        device_unlock(dev);
 809        dpm_watchdog_clear(&wd);
 810
 811 Complete:
 812        complete_all(&dev->power.completion);
 813
 814        TRACE_RESUME(error);
 815
 816        return error;
 817}
 818
 819static void async_resume(void *data, async_cookie_t cookie)
 820{
 821        struct device *dev = (struct device *)data;
 822        int error;
 823
 824        error = device_resume(dev, pm_transition, true);
 825        if (error)
 826                pm_dev_err(dev, pm_transition, " async", error);
 827        put_device(dev);
 828}
 829
 830/**
 831 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 832 * @state: PM transition of the system being carried out.
 833 *
 834 * Execute the appropriate "resume" callback for all devices whose status
 835 * indicates that they are suspended.
 836 */
 837void dpm_resume(pm_message_t state)
 838{
 839        struct device *dev;
 840        ktime_t starttime = ktime_get();
 841
 842        trace_suspend_resume(TPS("dpm_resume"), state.event, true);
 843        might_sleep();
 844
 845        mutex_lock(&dpm_list_mtx);
 846        pm_transition = state;
 847        async_error = 0;
 848
 849        list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
 850                reinit_completion(&dev->power.completion);
 851                if (is_async(dev)) {
 852                        get_device(dev);
 853                        async_schedule(async_resume, dev);
 854                }
 855        }
 856
 857        while (!list_empty(&dpm_suspended_list)) {
 858                dev = to_device(dpm_suspended_list.next);
 859                get_device(dev);
 860                if (!is_async(dev)) {
 861                        int error;
 862
 863                        mutex_unlock(&dpm_list_mtx);
 864
 865                        error = device_resume(dev, state, false);
 866                        if (error) {
 867                                suspend_stats.failed_resume++;
 868                                dpm_save_failed_step(SUSPEND_RESUME);
 869                                dpm_save_failed_dev(dev_name(dev));
 870                                pm_dev_err(dev, state, "", error);
 871                        }
 872
 873                        mutex_lock(&dpm_list_mtx);
 874                }
 875                if (!list_empty(&dev->power.entry))
 876                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
 877                put_device(dev);
 878        }
 879        mutex_unlock(&dpm_list_mtx);
 880        async_synchronize_full();
 881        dpm_show_time(starttime, state, NULL);
 882
 883        cpufreq_resume();
 884        trace_suspend_resume(TPS("dpm_resume"), state.event, false);
 885}
 886
 887/**
 888 * device_complete - Complete a PM transition for given device.
 889 * @dev: Device to handle.
 890 * @state: PM transition of the system being carried out.
 891 */
 892static void device_complete(struct device *dev, pm_message_t state)
 893{
 894        void (*callback)(struct device *) = NULL;
 895        char *info = NULL;
 896
 897        if (dev->power.syscore)
 898                return;
 899
 900        device_lock(dev);
 901
 902        if (dev->pm_domain) {
 903                info = "completing power domain ";
 904                callback = dev->pm_domain->ops.complete;
 905        } else if (dev->type && dev->type->pm) {
 906                info = "completing type ";
 907                callback = dev->type->pm->complete;
 908        } else if (dev->class && dev->class->pm) {
 909                info = "completing class ";
 910                callback = dev->class->pm->complete;
 911        } else if (dev->bus && dev->bus->pm) {
 912                info = "completing bus ";
 913                callback = dev->bus->pm->complete;
 914        }
 915
 916        if (!callback && dev->driver && dev->driver->pm) {
 917                info = "completing driver ";
 918                callback = dev->driver->pm->complete;
 919        }
 920
 921        if (callback) {
 922                pm_dev_dbg(dev, state, info);
 923                trace_device_pm_callback_start(dev, info, state.event);
 924                callback(dev);
 925                trace_device_pm_callback_end(dev, 0);
 926        }
 927
 928        device_unlock(dev);
 929
 930        pm_runtime_put(dev);
 931}
 932
 933/**
 934 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 935 * @state: PM transition of the system being carried out.
 936 *
 937 * Execute the ->complete() callbacks for all devices whose PM status is not
 938 * DPM_ON (this allows new devices to be registered).
 939 */
 940void dpm_complete(pm_message_t state)
 941{
 942        struct list_head list;
 943
 944        trace_suspend_resume(TPS("dpm_complete"), state.event, true);
 945        might_sleep();
 946
 947        INIT_LIST_HEAD(&list);
 948        mutex_lock(&dpm_list_mtx);
 949        while (!list_empty(&dpm_prepared_list)) {
 950                struct device *dev = to_device(dpm_prepared_list.prev);
 951
 952                get_device(dev);
 953                dev->power.is_prepared = false;
 954                list_move(&dev->power.entry, &list);
 955                mutex_unlock(&dpm_list_mtx);
 956
 957                device_complete(dev, state);
 958
 959                mutex_lock(&dpm_list_mtx);
 960                put_device(dev);
 961        }
 962        list_splice(&list, &dpm_list);
 963        mutex_unlock(&dpm_list_mtx);
 964        trace_suspend_resume(TPS("dpm_complete"), state.event, false);
 965}
 966
 967/**
 968 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 969 * @state: PM transition of the system being carried out.
 970 *
 971 * Execute "resume" callbacks for all devices and complete the PM transition of
 972 * the system.
 973 */
 974void dpm_resume_end(pm_message_t state)
 975{
 976        dpm_resume(state);
 977        dpm_complete(state);
 978}
 979EXPORT_SYMBOL_GPL(dpm_resume_end);
 980
 981
 982/*------------------------- Suspend routines -------------------------*/
 983
 984/**
 985 * resume_event - Return a "resume" message for given "suspend" sleep state.
 986 * @sleep_state: PM message representing a sleep state.
 987 *
 988 * Return a PM message representing the resume event corresponding to given
 989 * sleep state.
 990 */
 991static pm_message_t resume_event(pm_message_t sleep_state)
 992{
 993        switch (sleep_state.event) {
 994        case PM_EVENT_SUSPEND:
 995                return PMSG_RESUME;
 996        case PM_EVENT_FREEZE:
 997        case PM_EVENT_QUIESCE:
 998                return PMSG_RECOVER;
 999        case PM_EVENT_HIBERNATE:
1000                return PMSG_RESTORE;
1001        }
1002        return PMSG_ON;
1003}
1004
1005/**
1006 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1007 * @dev: Device to handle.
1008 * @state: PM transition of the system being carried out.
1009 * @async: If true, the device is being suspended asynchronously.
1010 *
1011 * The driver of @dev will not receive interrupts while this function is being
1012 * executed.
1013 */
1014static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1015{
1016        pm_callback_t callback = NULL;
1017        char *info = NULL;
1018        int error = 0;
1019
1020        if (async_error)
1021                goto Complete;
1022
1023        if (pm_wakeup_pending()) {
1024                async_error = -EBUSY;
1025                goto Complete;
1026        }
1027
1028        if (dev->power.syscore || dev->power.direct_complete)
1029                goto Complete;
1030
1031        dpm_wait_for_children(dev, async);
1032
1033        if (dev->pm_domain) {
1034                info = "noirq power domain ";
1035                callback = pm_noirq_op(&dev->pm_domain->ops, state);
1036        } else if (dev->type && dev->type->pm) {
1037                info = "noirq type ";
1038                callback = pm_noirq_op(dev->type->pm, state);
1039        } else if (dev->class && dev->class->pm) {
1040                info = "noirq class ";
1041                callback = pm_noirq_op(dev->class->pm, state);
1042        } else if (dev->bus && dev->bus->pm) {
1043                info = "noirq bus ";
1044                callback = pm_noirq_op(dev->bus->pm, state);
1045        }
1046
1047        if (!callback && dev->driver && dev->driver->pm) {
1048                info = "noirq driver ";
1049                callback = pm_noirq_op(dev->driver->pm, state);
1050        }
1051
1052        error = dpm_run_callback(callback, dev, state, info);
1053        if (!error)
1054                dev->power.is_noirq_suspended = true;
1055        else
1056                async_error = error;
1057
1058Complete:
1059        complete_all(&dev->power.completion);
1060        return error;
1061}
1062
1063static void async_suspend_noirq(void *data, async_cookie_t cookie)
1064{
1065        struct device *dev = (struct device *)data;
1066        int error;
1067
1068        error = __device_suspend_noirq(dev, pm_transition, true);
1069        if (error) {
1070                dpm_save_failed_dev(dev_name(dev));
1071                pm_dev_err(dev, pm_transition, " async", error);
1072        }
1073
1074        put_device(dev);
1075}
1076
1077static int device_suspend_noirq(struct device *dev)
1078{
1079        reinit_completion(&dev->power.completion);
1080
1081        if (pm_async_enabled && dev->power.async_suspend) {
1082                get_device(dev);
1083                async_schedule(async_suspend_noirq, dev);
1084                return 0;
1085        }
1086        return __device_suspend_noirq(dev, pm_transition, false);
1087}
1088
1089/**
1090 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1091 * @state: PM transition of the system being carried out.
1092 *
1093 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1094 * handlers for all non-sysdev devices.
1095 */
1096int dpm_suspend_noirq(pm_message_t state)
1097{
1098        ktime_t starttime = ktime_get();
1099        int error = 0;
1100
1101        trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1102        cpuidle_pause();
1103        suspend_device_irqs();
1104        mutex_lock(&dpm_list_mtx);
1105        pm_transition = state;
1106        async_error = 0;
1107
1108        while (!list_empty(&dpm_late_early_list)) {
1109                struct device *dev = to_device(dpm_late_early_list.prev);
1110
1111                get_device(dev);
1112                mutex_unlock(&dpm_list_mtx);
1113
1114                error = device_suspend_noirq(dev);
1115
1116                mutex_lock(&dpm_list_mtx);
1117                if (error) {
1118                        pm_dev_err(dev, state, " noirq", error);
1119                        dpm_save_failed_dev(dev_name(dev));
1120                        put_device(dev);
1121                        break;
1122                }
1123                if (!list_empty(&dev->power.entry))
1124                        list_move(&dev->power.entry, &dpm_noirq_list);
1125                put_device(dev);
1126
1127                if (async_error)
1128                        break;
1129        }
1130        mutex_unlock(&dpm_list_mtx);
1131        async_synchronize_full();
1132        if (!error)
1133                error = async_error;
1134
1135        if (error) {
1136                suspend_stats.failed_suspend_noirq++;
1137                dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1138                dpm_resume_noirq(resume_event(state));
1139        } else {
1140                dpm_show_time(starttime, state, "noirq");
1141        }
1142        trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1143        return error;
1144}
1145
1146/**
1147 * device_suspend_late - Execute a "late suspend" callback for given device.
1148 * @dev: Device to handle.
1149 * @state: PM transition of the system being carried out.
1150 * @async: If true, the device is being suspended asynchronously.
1151 *
1152 * Runtime PM is disabled for @dev while this function is being executed.
1153 */
1154static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1155{
1156        pm_callback_t callback = NULL;
1157        char *info = NULL;
1158        int error = 0;
1159
1160        __pm_runtime_disable(dev, false);
1161
1162        if (async_error)
1163                goto Complete;
1164
1165        if (pm_wakeup_pending()) {
1166                async_error = -EBUSY;
1167                goto Complete;
1168        }
1169
1170        if (dev->power.syscore || dev->power.direct_complete)
1171                goto Complete;
1172
1173        dpm_wait_for_children(dev, async);
1174
1175        if (dev->pm_domain) {
1176                info = "late power domain ";
1177                callback = pm_late_early_op(&dev->pm_domain->ops, state);
1178        } else if (dev->type && dev->type->pm) {
1179                info = "late type ";
1180                callback = pm_late_early_op(dev->type->pm, state);
1181        } else if (dev->class && dev->class->pm) {
1182                info = "late class ";
1183                callback = pm_late_early_op(dev->class->pm, state);
1184        } else if (dev->bus && dev->bus->pm) {
1185                info = "late bus ";
1186                callback = pm_late_early_op(dev->bus->pm, state);
1187        }
1188
1189        if (!callback && dev->driver && dev->driver->pm) {
1190                info = "late driver ";
1191                callback = pm_late_early_op(dev->driver->pm, state);
1192        }
1193
1194        error = dpm_run_callback(callback, dev, state, info);
1195        if (!error)
1196                dev->power.is_late_suspended = true;
1197        else
1198                async_error = error;
1199
1200Complete:
1201        complete_all(&dev->power.completion);
1202        return error;
1203}
1204
1205static void async_suspend_late(void *data, async_cookie_t cookie)
1206{
1207        struct device *dev = (struct device *)data;
1208        int error;
1209
1210        error = __device_suspend_late(dev, pm_transition, true);
1211        if (error) {
1212                dpm_save_failed_dev(dev_name(dev));
1213                pm_dev_err(dev, pm_transition, " async", error);
1214        }
1215        put_device(dev);
1216}
1217
1218static int device_suspend_late(struct device *dev)
1219{
1220        reinit_completion(&dev->power.completion);
1221
1222        if (pm_async_enabled && dev->power.async_suspend) {
1223                get_device(dev);
1224                async_schedule(async_suspend_late, dev);
1225                return 0;
1226        }
1227
1228        return __device_suspend_late(dev, pm_transition, false);
1229}
1230
1231/**
1232 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1233 * @state: PM transition of the system being carried out.
1234 */
1235int dpm_suspend_late(pm_message_t state)
1236{
1237        ktime_t starttime = ktime_get();
1238        int error = 0;
1239
1240        trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1241        mutex_lock(&dpm_list_mtx);
1242        pm_transition = state;
1243        async_error = 0;
1244
1245        while (!list_empty(&dpm_suspended_list)) {
1246                struct device *dev = to_device(dpm_suspended_list.prev);
1247
1248                get_device(dev);
1249                mutex_unlock(&dpm_list_mtx);
1250
1251                error = device_suspend_late(dev);
1252
1253                mutex_lock(&dpm_list_mtx);
1254                if (error) {
1255                        pm_dev_err(dev, state, " late", error);
1256                        dpm_save_failed_dev(dev_name(dev));
1257                        put_device(dev);
1258                        break;
1259                }
1260                if (!list_empty(&dev->power.entry))
1261                        list_move(&dev->power.entry, &dpm_late_early_list);
1262                put_device(dev);
1263
1264                if (async_error)
1265                        break;
1266        }
1267        mutex_unlock(&dpm_list_mtx);
1268        async_synchronize_full();
1269        if (!error)
1270                error = async_error;
1271        if (error) {
1272                suspend_stats.failed_suspend_late++;
1273                dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1274                dpm_resume_early(resume_event(state));
1275        } else {
1276                dpm_show_time(starttime, state, "late");
1277        }
1278        trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1279        return error;
1280}
1281
1282/**
1283 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1284 * @state: PM transition of the system being carried out.
1285 */
1286int dpm_suspend_end(pm_message_t state)
1287{
1288        int error = dpm_suspend_late(state);
1289        if (error)
1290                return error;
1291
1292        error = dpm_suspend_noirq(state);
1293        if (error) {
1294                dpm_resume_early(resume_event(state));
1295                return error;
1296        }
1297
1298        return 0;
1299}
1300EXPORT_SYMBOL_GPL(dpm_suspend_end);
1301
1302/**
1303 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1304 * @dev: Device to suspend.
1305 * @state: PM transition of the system being carried out.
1306 * @cb: Suspend callback to execute.
1307 * @info: string description of caller.
1308 */
1309static int legacy_suspend(struct device *dev, pm_message_t state,
1310                          int (*cb)(struct device *dev, pm_message_t state),
1311                          char *info)
1312{
1313        int error;
1314        ktime_t calltime;
1315
1316        calltime = initcall_debug_start(dev);
1317
1318        trace_device_pm_callback_start(dev, info, state.event);
1319        error = cb(dev, state);
1320        trace_device_pm_callback_end(dev, error);
1321        suspend_report_result(cb, error);
1322
1323        initcall_debug_report(dev, calltime, error, state, info);
1324
1325        return error;
1326}
1327
1328/**
1329 * device_suspend - Execute "suspend" callbacks for given device.
1330 * @dev: Device to handle.
1331 * @state: PM transition of the system being carried out.
1332 * @async: If true, the device is being suspended asynchronously.
1333 */
1334static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1335{
1336        pm_callback_t callback = NULL;
1337        char *info = NULL;
1338        int error = 0;
1339        DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1340
1341        dpm_wait_for_children(dev, async);
1342
1343        if (async_error)
1344                goto Complete;
1345
1346        /*
1347         * If a device configured to wake up the system from sleep states
1348         * has been suspended at run time and there's a resume request pending
1349         * for it, this is equivalent to the device signaling wakeup, so the
1350         * system suspend operation should be aborted.
1351         */
1352        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1353                pm_wakeup_event(dev, 0);
1354
1355        if (pm_wakeup_pending()) {
1356                async_error = -EBUSY;
1357                goto Complete;
1358        }
1359
1360        if (dev->power.syscore)
1361                goto Complete;
1362
1363        if (dev->power.direct_complete) {
1364                if (pm_runtime_status_suspended(dev)) {
1365                        pm_runtime_disable(dev);
1366                        if (pm_runtime_suspended_if_enabled(dev))
1367                                goto Complete;
1368
1369                        pm_runtime_enable(dev);
1370                }
1371                dev->power.direct_complete = false;
1372        }
1373
1374        dpm_watchdog_set(&wd, dev);
1375        device_lock(dev);
1376
1377        if (dev->pm_domain) {
1378                info = "power domain ";
1379                callback = pm_op(&dev->pm_domain->ops, state);
1380                goto Run;
1381        }
1382
1383        if (dev->type && dev->type->pm) {
1384                info = "type ";
1385                callback = pm_op(dev->type->pm, state);
1386                goto Run;
1387        }
1388
1389        if (dev->class) {
1390                if (dev->class->pm) {
1391                        info = "class ";
1392                        callback = pm_op(dev->class->pm, state);
1393                        goto Run;
1394                } else if (dev->class->suspend) {
1395                        pm_dev_dbg(dev, state, "legacy class ");
1396                        error = legacy_suspend(dev, state, dev->class->suspend,
1397                                                "legacy class ");
1398                        goto End;
1399                }
1400        }
1401
1402        if (dev->bus) {
1403                if (dev->bus->pm) {
1404                        info = "bus ";
1405                        callback = pm_op(dev->bus->pm, state);
1406                } else if (dev->bus->suspend) {
1407                        pm_dev_dbg(dev, state, "legacy bus ");
1408                        error = legacy_suspend(dev, state, dev->bus->suspend,
1409                                                "legacy bus ");
1410                        goto End;
1411                }
1412        }
1413
1414 Run:
1415        if (!callback && dev->driver && dev->driver->pm) {
1416                info = "driver ";
1417                callback = pm_op(dev->driver->pm, state);
1418        }
1419
1420        error = dpm_run_callback(callback, dev, state, info);
1421
1422 End:
1423        if (!error) {
1424                struct device *parent = dev->parent;
1425
1426                dev->power.is_suspended = true;
1427                if (parent) {
1428                        spin_lock_irq(&parent->power.lock);
1429
1430                        dev->parent->power.direct_complete = false;
1431                        if (dev->power.wakeup_path
1432                            && !dev->parent->power.ignore_children)
1433                                dev->parent->power.wakeup_path = true;
1434
1435                        spin_unlock_irq(&parent->power.lock);
1436                }
1437        }
1438
1439        device_unlock(dev);
1440        dpm_watchdog_clear(&wd);
1441
1442 Complete:
1443        complete_all(&dev->power.completion);
1444        if (error)
1445                async_error = error;
1446
1447        return error;
1448}
1449
1450static void async_suspend(void *data, async_cookie_t cookie)
1451{
1452        struct device *dev = (struct device *)data;
1453        int error;
1454
1455        error = __device_suspend(dev, pm_transition, true);
1456        if (error) {
1457                dpm_save_failed_dev(dev_name(dev));
1458                pm_dev_err(dev, pm_transition, " async", error);
1459        }
1460
1461        put_device(dev);
1462}
1463
1464static int device_suspend(struct device *dev)
1465{
1466        reinit_completion(&dev->power.completion);
1467
1468        if (pm_async_enabled && dev->power.async_suspend) {
1469                get_device(dev);
1470                async_schedule(async_suspend, dev);
1471                return 0;
1472        }
1473
1474        return __device_suspend(dev, pm_transition, false);
1475}
1476
1477/**
1478 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1479 * @state: PM transition of the system being carried out.
1480 */
1481int dpm_suspend(pm_message_t state)
1482{
1483        ktime_t starttime = ktime_get();
1484        int error = 0;
1485
1486        trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1487        might_sleep();
1488
1489        cpufreq_suspend();
1490
1491        mutex_lock(&dpm_list_mtx);
1492        pm_transition = state;
1493        async_error = 0;
1494        while (!list_empty(&dpm_prepared_list)) {
1495                struct device *dev = to_device(dpm_prepared_list.prev);
1496
1497                get_device(dev);
1498                mutex_unlock(&dpm_list_mtx);
1499
1500                error = device_suspend(dev);
1501
1502                mutex_lock(&dpm_list_mtx);
1503                if (error) {
1504                        pm_dev_err(dev, state, "", error);
1505                        dpm_save_failed_dev(dev_name(dev));
1506                        put_device(dev);
1507                        break;
1508                }
1509                if (!list_empty(&dev->power.entry))
1510                        list_move(&dev->power.entry, &dpm_suspended_list);
1511                put_device(dev);
1512                if (async_error)
1513                        break;
1514        }
1515        mutex_unlock(&dpm_list_mtx);
1516        async_synchronize_full();
1517        if (!error)
1518                error = async_error;
1519        if (error) {
1520                suspend_stats.failed_suspend++;
1521                dpm_save_failed_step(SUSPEND_SUSPEND);
1522        } else
1523                dpm_show_time(starttime, state, NULL);
1524        trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1525        return error;
1526}
1527
1528/**
1529 * device_prepare - Prepare a device for system power transition.
1530 * @dev: Device to handle.
1531 * @state: PM transition of the system being carried out.
1532 *
1533 * Execute the ->prepare() callback(s) for given device.  No new children of the
1534 * device may be registered after this function has returned.
1535 */
1536static int device_prepare(struct device *dev, pm_message_t state)
1537{
1538        int (*callback)(struct device *) = NULL;
1539        char *info = NULL;
1540        int ret = 0;
1541
1542        if (dev->power.syscore)
1543                return 0;
1544
1545        /*
1546         * If a device's parent goes into runtime suspend at the wrong time,
1547         * it won't be possible to resume the device.  To prevent this we
1548         * block runtime suspend here, during the prepare phase, and allow
1549         * it again during the complete phase.
1550         */
1551        pm_runtime_get_noresume(dev);
1552
1553        device_lock(dev);
1554
1555        dev->power.wakeup_path = device_may_wakeup(dev);
1556
1557        if (dev->pm_domain) {
1558                info = "preparing power domain ";
1559                callback = dev->pm_domain->ops.prepare;
1560        } else if (dev->type && dev->type->pm) {
1561                info = "preparing type ";
1562                callback = dev->type->pm->prepare;
1563        } else if (dev->class && dev->class->pm) {
1564                info = "preparing class ";
1565                callback = dev->class->pm->prepare;
1566        } else if (dev->bus && dev->bus->pm) {
1567                info = "preparing bus ";
1568                callback = dev->bus->pm->prepare;
1569        }
1570
1571        if (!callback && dev->driver && dev->driver->pm) {
1572                info = "preparing driver ";
1573                callback = dev->driver->pm->prepare;
1574        }
1575
1576        if (callback) {
1577                trace_device_pm_callback_start(dev, info, state.event);
1578                ret = callback(dev);
1579                trace_device_pm_callback_end(dev, ret);
1580        }
1581
1582        device_unlock(dev);
1583
1584        if (ret < 0) {
1585                suspend_report_result(callback, ret);
1586                pm_runtime_put(dev);
1587                return ret;
1588        }
1589        /*
1590         * A positive return value from ->prepare() means "this device appears
1591         * to be runtime-suspended and its state is fine, so if it really is
1592         * runtime-suspended, you can leave it in that state provided that you
1593         * will do the same thing with all of its descendants".  This only
1594         * applies to suspend transitions, however.
1595         */
1596        spin_lock_irq(&dev->power.lock);
1597        dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1598        spin_unlock_irq(&dev->power.lock);
1599        return 0;
1600}
1601
1602/**
1603 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1604 * @state: PM transition of the system being carried out.
1605 *
1606 * Execute the ->prepare() callback(s) for all devices.
1607 */
1608int dpm_prepare(pm_message_t state)
1609{
1610        int error = 0;
1611
1612        trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1613        might_sleep();
1614
1615        mutex_lock(&dpm_list_mtx);
1616        while (!list_empty(&dpm_list)) {
1617                struct device *dev = to_device(dpm_list.next);
1618
1619                get_device(dev);
1620                mutex_unlock(&dpm_list_mtx);
1621
1622                error = device_prepare(dev, state);
1623
1624                mutex_lock(&dpm_list_mtx);
1625                if (error) {
1626                        if (error == -EAGAIN) {
1627                                put_device(dev);
1628                                error = 0;
1629                                continue;
1630                        }
1631                        printk(KERN_INFO "PM: Device %s not prepared "
1632                                "for power transition: code %d\n",
1633                                dev_name(dev), error);
1634                        put_device(dev);
1635                        break;
1636                }
1637                dev->power.is_prepared = true;
1638                if (!list_empty(&dev->power.entry))
1639                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
1640                put_device(dev);
1641        }
1642        mutex_unlock(&dpm_list_mtx);
1643        trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1644        return error;
1645}
1646
1647/**
1648 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1649 * @state: PM transition of the system being carried out.
1650 *
1651 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1652 * callbacks for them.
1653 */
1654int dpm_suspend_start(pm_message_t state)
1655{
1656        int error;
1657
1658        error = dpm_prepare(state);
1659        if (error) {
1660                suspend_stats.failed_prepare++;
1661                dpm_save_failed_step(SUSPEND_PREPARE);
1662        } else
1663                error = dpm_suspend(state);
1664        return error;
1665}
1666EXPORT_SYMBOL_GPL(dpm_suspend_start);
1667
1668void __suspend_report_result(const char *function, void *fn, int ret)
1669{
1670        if (ret)
1671                printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1672}
1673EXPORT_SYMBOL_GPL(__suspend_report_result);
1674
1675/**
1676 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1677 * @dev: Device to wait for.
1678 * @subordinate: Device that needs to wait for @dev.
1679 */
1680int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1681{
1682        dpm_wait(dev, subordinate->power.async_suspend);
1683        return async_error;
1684}
1685EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1686
1687/**
1688 * dpm_for_each_dev - device iterator.
1689 * @data: data for the callback.
1690 * @fn: function to be called for each device.
1691 *
1692 * Iterate over devices in dpm_list, and call @fn for each device,
1693 * passing it @data.
1694 */
1695void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1696{
1697        struct device *dev;
1698
1699        if (!fn)
1700                return;
1701
1702        device_pm_lock();
1703        list_for_each_entry(dev, &dpm_list, power.entry)
1704                fn(dev, data);
1705        device_pm_unlock();
1706}
1707EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1708