linux/drivers/base/power/main.c
<<
>>
Prefs
   1/*
   2 * drivers/base/power/main.c - Where the driver meets power management.
   3 *
   4 * Copyright (c) 2003 Patrick Mochel
   5 * Copyright (c) 2003 Open Source Development Lab
   6 *
   7 * This file is released under the GPLv2
   8 *
   9 *
  10 * The driver model core calls device_pm_add() when a device is registered.
  11 * This will initialize the embedded device_pm_info object in the device
  12 * and add it to the list of power-controlled devices. sysfs entries for
  13 * controlling device power management will also be added.
  14 *
  15 * A separate list is used for keeping track of power info, because the power
  16 * domain dependencies may differ from the ancestral dependencies that the
  17 * subsystem list maintains.
  18 */
  19
  20#include <linux/device.h>
  21#include <linux/kallsyms.h>
  22#include <linux/export.h>
  23#include <linux/mutex.h>
  24#include <linux/pm.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/resume-trace.h>
  27#include <linux/interrupt.h>
  28#include <linux/sched.h>
  29#include <linux/async.h>
  30#include <linux/suspend.h>
  31#include <trace/events/power.h>
  32#include <linux/cpuidle.h>
  33#include <linux/timer.h>
  34
  35#include "../base.h"
  36#include "power.h"
  37
  38typedef int (*pm_callback_t)(struct device *);
  39
  40/*
  41 * The entries in the dpm_list list are in a depth first order, simply
  42 * because children are guaranteed to be discovered after parents, and
  43 * are inserted at the back of the list on discovery.
  44 *
  45 * Since device_pm_add() may be called with a device lock held,
  46 * we must never try to acquire a device lock while holding
  47 * dpm_list_mutex.
  48 */
  49
  50LIST_HEAD(dpm_list);
  51static LIST_HEAD(dpm_prepared_list);
  52static LIST_HEAD(dpm_suspended_list);
  53static LIST_HEAD(dpm_late_early_list);
  54static LIST_HEAD(dpm_noirq_list);
  55
  56struct suspend_stats suspend_stats;
  57static DEFINE_MUTEX(dpm_list_mtx);
  58static pm_message_t pm_transition;
  59
  60static int async_error;
  61
  62static char *pm_verb(int event)
  63{
  64        switch (event) {
  65        case PM_EVENT_SUSPEND:
  66                return "suspend";
  67        case PM_EVENT_RESUME:
  68                return "resume";
  69        case PM_EVENT_FREEZE:
  70                return "freeze";
  71        case PM_EVENT_QUIESCE:
  72                return "quiesce";
  73        case PM_EVENT_HIBERNATE:
  74                return "hibernate";
  75        case PM_EVENT_THAW:
  76                return "thaw";
  77        case PM_EVENT_RESTORE:
  78                return "restore";
  79        case PM_EVENT_RECOVER:
  80                return "recover";
  81        default:
  82                return "(unknown PM event)";
  83        }
  84}
  85
  86/**
  87 * device_pm_sleep_init - Initialize system suspend-related device fields.
  88 * @dev: Device object being initialized.
  89 */
  90void device_pm_sleep_init(struct device *dev)
  91{
  92        dev->power.is_prepared = false;
  93        dev->power.is_suspended = false;
  94        init_completion(&dev->power.completion);
  95        complete_all(&dev->power.completion);
  96        dev->power.wakeup = NULL;
  97        INIT_LIST_HEAD(&dev->power.entry);
  98}
  99
 100/**
 101 * device_pm_lock - Lock the list of active devices used by the PM core.
 102 */
 103void device_pm_lock(void)
 104{
 105        mutex_lock(&dpm_list_mtx);
 106}
 107
 108/**
 109 * device_pm_unlock - Unlock the list of active devices used by the PM core.
 110 */
 111void device_pm_unlock(void)
 112{
 113        mutex_unlock(&dpm_list_mtx);
 114}
 115
 116/**
 117 * device_pm_add - Add a device to the PM core's list of active devices.
 118 * @dev: Device to add to the list.
 119 */
 120void device_pm_add(struct device *dev)
 121{
 122        pr_debug("PM: Adding info for %s:%s\n",
 123                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 124        mutex_lock(&dpm_list_mtx);
 125        if (dev->parent && dev->parent->power.is_prepared)
 126                dev_warn(dev, "parent %s should not be sleeping\n",
 127                        dev_name(dev->parent));
 128        list_add_tail(&dev->power.entry, &dpm_list);
 129        mutex_unlock(&dpm_list_mtx);
 130}
 131
 132/**
 133 * device_pm_remove - Remove a device from the PM core's list of active devices.
 134 * @dev: Device to be removed from the list.
 135 */
 136void device_pm_remove(struct device *dev)
 137{
 138        pr_debug("PM: Removing info for %s:%s\n",
 139                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 140        complete_all(&dev->power.completion);
 141        mutex_lock(&dpm_list_mtx);
 142        list_del_init(&dev->power.entry);
 143        mutex_unlock(&dpm_list_mtx);
 144        device_wakeup_disable(dev);
 145        pm_runtime_remove(dev);
 146}
 147
 148/**
 149 * device_pm_move_before - Move device in the PM core's list of active devices.
 150 * @deva: Device to move in dpm_list.
 151 * @devb: Device @deva should come before.
 152 */
 153void device_pm_move_before(struct device *deva, struct device *devb)
 154{
 155        pr_debug("PM: Moving %s:%s before %s:%s\n",
 156                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 157                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 158        /* Delete deva from dpm_list and reinsert before devb. */
 159        list_move_tail(&deva->power.entry, &devb->power.entry);
 160}
 161
 162/**
 163 * device_pm_move_after - Move device in the PM core's list of active devices.
 164 * @deva: Device to move in dpm_list.
 165 * @devb: Device @deva should come after.
 166 */
 167void device_pm_move_after(struct device *deva, struct device *devb)
 168{
 169        pr_debug("PM: Moving %s:%s after %s:%s\n",
 170                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 171                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 172        /* Delete deva from dpm_list and reinsert after devb. */
 173        list_move(&deva->power.entry, &devb->power.entry);
 174}
 175
 176/**
 177 * device_pm_move_last - Move device to end of the PM core's list of devices.
 178 * @dev: Device to move in dpm_list.
 179 */
 180void device_pm_move_last(struct device *dev)
 181{
 182        pr_debug("PM: Moving %s:%s to end of list\n",
 183                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 184        list_move_tail(&dev->power.entry, &dpm_list);
 185}
 186
 187static ktime_t initcall_debug_start(struct device *dev)
 188{
 189        ktime_t calltime = ktime_set(0, 0);
 190
 191        if (pm_print_times_enabled) {
 192                pr_info("calling  %s+ @ %i, parent: %s\n",
 193                        dev_name(dev), task_pid_nr(current),
 194                        dev->parent ? dev_name(dev->parent) : "none");
 195                calltime = ktime_get();
 196        }
 197
 198        return calltime;
 199}
 200
 201static void initcall_debug_report(struct device *dev, ktime_t calltime,
 202                                  int error, pm_message_t state, char *info)
 203{
 204        ktime_t rettime;
 205        s64 nsecs;
 206
 207        rettime = ktime_get();
 208        nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
 209
 210        if (pm_print_times_enabled) {
 211                pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
 212                        error, (unsigned long long)nsecs >> 10);
 213        }
 214
 215        trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
 216                                    error);
 217}
 218
 219/**
 220 * dpm_wait - Wait for a PM operation to complete.
 221 * @dev: Device to wait for.
 222 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 223 */
 224static void dpm_wait(struct device *dev, bool async)
 225{
 226        if (!dev)
 227                return;
 228
 229        if (async || (pm_async_enabled && dev->power.async_suspend))
 230                wait_for_completion(&dev->power.completion);
 231}
 232
 233static int dpm_wait_fn(struct device *dev, void *async_ptr)
 234{
 235        dpm_wait(dev, *((bool *)async_ptr));
 236        return 0;
 237}
 238
 239static void dpm_wait_for_children(struct device *dev, bool async)
 240{
 241       device_for_each_child(dev, &async, dpm_wait_fn);
 242}
 243
 244/**
 245 * pm_op - Return the PM operation appropriate for given PM event.
 246 * @ops: PM operations to choose from.
 247 * @state: PM transition of the system being carried out.
 248 */
 249static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 250{
 251        switch (state.event) {
 252#ifdef CONFIG_SUSPEND
 253        case PM_EVENT_SUSPEND:
 254                return ops->suspend;
 255        case PM_EVENT_RESUME:
 256                return ops->resume;
 257#endif /* CONFIG_SUSPEND */
 258#ifdef CONFIG_HIBERNATE_CALLBACKS
 259        case PM_EVENT_FREEZE:
 260        case PM_EVENT_QUIESCE:
 261                return ops->freeze;
 262        case PM_EVENT_HIBERNATE:
 263                return ops->poweroff;
 264        case PM_EVENT_THAW:
 265        case PM_EVENT_RECOVER:
 266                return ops->thaw;
 267                break;
 268        case PM_EVENT_RESTORE:
 269                return ops->restore;
 270#endif /* CONFIG_HIBERNATE_CALLBACKS */
 271        }
 272
 273        return NULL;
 274}
 275
 276/**
 277 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 278 * @ops: PM operations to choose from.
 279 * @state: PM transition of the system being carried out.
 280 *
 281 * Runtime PM is disabled for @dev while this function is being executed.
 282 */
 283static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 284                                      pm_message_t state)
 285{
 286        switch (state.event) {
 287#ifdef CONFIG_SUSPEND
 288        case PM_EVENT_SUSPEND:
 289                return ops->suspend_late;
 290        case PM_EVENT_RESUME:
 291                return ops->resume_early;
 292#endif /* CONFIG_SUSPEND */
 293#ifdef CONFIG_HIBERNATE_CALLBACKS
 294        case PM_EVENT_FREEZE:
 295        case PM_EVENT_QUIESCE:
 296                return ops->freeze_late;
 297        case PM_EVENT_HIBERNATE:
 298                return ops->poweroff_late;
 299        case PM_EVENT_THAW:
 300        case PM_EVENT_RECOVER:
 301                return ops->thaw_early;
 302        case PM_EVENT_RESTORE:
 303                return ops->restore_early;
 304#endif /* CONFIG_HIBERNATE_CALLBACKS */
 305        }
 306
 307        return NULL;
 308}
 309
 310/**
 311 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 312 * @ops: PM operations to choose from.
 313 * @state: PM transition of the system being carried out.
 314 *
 315 * The driver of @dev will not receive interrupts while this function is being
 316 * executed.
 317 */
 318static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 319{
 320        switch (state.event) {
 321#ifdef CONFIG_SUSPEND
 322        case PM_EVENT_SUSPEND:
 323                return ops->suspend_noirq;
 324        case PM_EVENT_RESUME:
 325                return ops->resume_noirq;
 326#endif /* CONFIG_SUSPEND */
 327#ifdef CONFIG_HIBERNATE_CALLBACKS
 328        case PM_EVENT_FREEZE:
 329        case PM_EVENT_QUIESCE:
 330                return ops->freeze_noirq;
 331        case PM_EVENT_HIBERNATE:
 332                return ops->poweroff_noirq;
 333        case PM_EVENT_THAW:
 334        case PM_EVENT_RECOVER:
 335                return ops->thaw_noirq;
 336        case PM_EVENT_RESTORE:
 337                return ops->restore_noirq;
 338#endif /* CONFIG_HIBERNATE_CALLBACKS */
 339        }
 340
 341        return NULL;
 342}
 343
 344static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
 345{
 346        dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 347                ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 348                ", may wakeup" : "");
 349}
 350
 351static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
 352                        int error)
 353{
 354        printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
 355                dev_name(dev), pm_verb(state.event), info, error);
 356}
 357
 358static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
 359{
 360        ktime_t calltime;
 361        u64 usecs64;
 362        int usecs;
 363
 364        calltime = ktime_get();
 365        usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 366        do_div(usecs64, NSEC_PER_USEC);
 367        usecs = usecs64;
 368        if (usecs == 0)
 369                usecs = 1;
 370        pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
 371                info ?: "", info ? " " : "", pm_verb(state.event),
 372                usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 373}
 374
 375static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 376                            pm_message_t state, char *info)
 377{
 378        ktime_t calltime;
 379        int error;
 380
 381        if (!cb)
 382                return 0;
 383
 384        calltime = initcall_debug_start(dev);
 385
 386        pm_dev_dbg(dev, state, info);
 387        error = cb(dev);
 388        suspend_report_result(cb, error);
 389
 390        initcall_debug_report(dev, calltime, error, state, info);
 391
 392        return error;
 393}
 394
 395#ifdef CONFIG_DPM_WATCHDOG
 396struct dpm_watchdog {
 397        struct device           *dev;
 398        struct task_struct      *tsk;
 399        struct timer_list       timer;
 400};
 401
 402#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
 403        struct dpm_watchdog wd
 404
 405/**
 406 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 407 * @data: Watchdog object address.
 408 *
 409 * Called when a driver has timed out suspending or resuming.
 410 * There's not much we can do here to recover so panic() to
 411 * capture a crash-dump in pstore.
 412 */
 413static void dpm_watchdog_handler(unsigned long data)
 414{
 415        struct dpm_watchdog *wd = (void *)data;
 416
 417        dev_emerg(wd->dev, "**** DPM device timeout ****\n");
 418        show_stack(wd->tsk, NULL);
 419        panic("%s %s: unrecoverable failure\n",
 420                dev_driver_string(wd->dev), dev_name(wd->dev));
 421}
 422
 423/**
 424 * dpm_watchdog_set - Enable pm watchdog for given device.
 425 * @wd: Watchdog. Must be allocated on the stack.
 426 * @dev: Device to handle.
 427 */
 428static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
 429{
 430        struct timer_list *timer = &wd->timer;
 431
 432        wd->dev = dev;
 433        wd->tsk = current;
 434
 435        init_timer_on_stack(timer);
 436        /* use same timeout value for both suspend and resume */
 437        timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
 438        timer->function = dpm_watchdog_handler;
 439        timer->data = (unsigned long)wd;
 440        add_timer(timer);
 441}
 442
 443/**
 444 * dpm_watchdog_clear - Disable suspend/resume watchdog.
 445 * @wd: Watchdog to disable.
 446 */
 447static void dpm_watchdog_clear(struct dpm_watchdog *wd)
 448{
 449        struct timer_list *timer = &wd->timer;
 450
 451        del_timer_sync(timer);
 452        destroy_timer_on_stack(timer);
 453}
 454#else
 455#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
 456#define dpm_watchdog_set(x, y)
 457#define dpm_watchdog_clear(x)
 458#endif
 459
 460/*------------------------- Resume routines -------------------------*/
 461
 462/**
 463 * device_resume_noirq - Execute an "early resume" callback for given device.
 464 * @dev: Device to handle.
 465 * @state: PM transition of the system being carried out.
 466 *
 467 * The driver of @dev will not receive interrupts while this function is being
 468 * executed.
 469 */
 470static int device_resume_noirq(struct device *dev, pm_message_t state)
 471{
 472        pm_callback_t callback = NULL;
 473        char *info = NULL;
 474        int error = 0;
 475
 476        TRACE_DEVICE(dev);
 477        TRACE_RESUME(0);
 478
 479        if (dev->power.syscore)
 480                goto Out;
 481
 482        if (dev->pm_domain) {
 483                info = "noirq power domain ";
 484                callback = pm_noirq_op(&dev->pm_domain->ops, state);
 485        } else if (dev->type && dev->type->pm) {
 486                info = "noirq type ";
 487                callback = pm_noirq_op(dev->type->pm, state);
 488        } else if (dev->class && dev->class->pm) {
 489                info = "noirq class ";
 490                callback = pm_noirq_op(dev->class->pm, state);
 491        } else if (dev->bus && dev->bus->pm) {
 492                info = "noirq bus ";
 493                callback = pm_noirq_op(dev->bus->pm, state);
 494        }
 495
 496        if (!callback && dev->driver && dev->driver->pm) {
 497                info = "noirq driver ";
 498                callback = pm_noirq_op(dev->driver->pm, state);
 499        }
 500
 501        error = dpm_run_callback(callback, dev, state, info);
 502
 503 Out:
 504        TRACE_RESUME(error);
 505        return error;
 506}
 507
 508/**
 509 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 510 * @state: PM transition of the system being carried out.
 511 *
 512 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
 513 * enable device drivers to receive interrupts.
 514 */
 515static void dpm_resume_noirq(pm_message_t state)
 516{
 517        ktime_t starttime = ktime_get();
 518
 519        mutex_lock(&dpm_list_mtx);
 520        while (!list_empty(&dpm_noirq_list)) {
 521                struct device *dev = to_device(dpm_noirq_list.next);
 522                int error;
 523
 524                get_device(dev);
 525                list_move_tail(&dev->power.entry, &dpm_late_early_list);
 526                mutex_unlock(&dpm_list_mtx);
 527
 528                error = device_resume_noirq(dev, state);
 529                if (error) {
 530                        suspend_stats.failed_resume_noirq++;
 531                        dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 532                        dpm_save_failed_dev(dev_name(dev));
 533                        pm_dev_err(dev, state, " noirq", error);
 534                }
 535
 536                mutex_lock(&dpm_list_mtx);
 537                put_device(dev);
 538        }
 539        mutex_unlock(&dpm_list_mtx);
 540        dpm_show_time(starttime, state, "noirq");
 541        resume_device_irqs();
 542        cpuidle_resume();
 543}
 544
 545/**
 546 * device_resume_early - Execute an "early resume" callback for given device.
 547 * @dev: Device to handle.
 548 * @state: PM transition of the system being carried out.
 549 *
 550 * Runtime PM is disabled for @dev while this function is being executed.
 551 */
 552static int device_resume_early(struct device *dev, pm_message_t state)
 553{
 554        pm_callback_t callback = NULL;
 555        char *info = NULL;
 556        int error = 0;
 557
 558        TRACE_DEVICE(dev);
 559        TRACE_RESUME(0);
 560
 561        if (dev->power.syscore)
 562                goto Out;
 563
 564        if (dev->pm_domain) {
 565                info = "early power domain ";
 566                callback = pm_late_early_op(&dev->pm_domain->ops, state);
 567        } else if (dev->type && dev->type->pm) {
 568                info = "early type ";
 569                callback = pm_late_early_op(dev->type->pm, state);
 570        } else if (dev->class && dev->class->pm) {
 571                info = "early class ";
 572                callback = pm_late_early_op(dev->class->pm, state);
 573        } else if (dev->bus && dev->bus->pm) {
 574                info = "early bus ";
 575                callback = pm_late_early_op(dev->bus->pm, state);
 576        }
 577
 578        if (!callback && dev->driver && dev->driver->pm) {
 579                info = "early driver ";
 580                callback = pm_late_early_op(dev->driver->pm, state);
 581        }
 582
 583        error = dpm_run_callback(callback, dev, state, info);
 584
 585 Out:
 586        TRACE_RESUME(error);
 587
 588        pm_runtime_enable(dev);
 589        return error;
 590}
 591
 592/**
 593 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 594 * @state: PM transition of the system being carried out.
 595 */
 596static void dpm_resume_early(pm_message_t state)
 597{
 598        ktime_t starttime = ktime_get();
 599
 600        mutex_lock(&dpm_list_mtx);
 601        while (!list_empty(&dpm_late_early_list)) {
 602                struct device *dev = to_device(dpm_late_early_list.next);
 603                int error;
 604
 605                get_device(dev);
 606                list_move_tail(&dev->power.entry, &dpm_suspended_list);
 607                mutex_unlock(&dpm_list_mtx);
 608
 609                error = device_resume_early(dev, state);
 610                if (error) {
 611                        suspend_stats.failed_resume_early++;
 612                        dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 613                        dpm_save_failed_dev(dev_name(dev));
 614                        pm_dev_err(dev, state, " early", error);
 615                }
 616
 617                mutex_lock(&dpm_list_mtx);
 618                put_device(dev);
 619        }
 620        mutex_unlock(&dpm_list_mtx);
 621        dpm_show_time(starttime, state, "early");
 622}
 623
 624/**
 625 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 626 * @state: PM transition of the system being carried out.
 627 */
 628void dpm_resume_start(pm_message_t state)
 629{
 630        dpm_resume_noirq(state);
 631        dpm_resume_early(state);
 632}
 633EXPORT_SYMBOL_GPL(dpm_resume_start);
 634
 635/**
 636 * device_resume - Execute "resume" callbacks for given device.
 637 * @dev: Device to handle.
 638 * @state: PM transition of the system being carried out.
 639 * @async: If true, the device is being resumed asynchronously.
 640 */
 641static int device_resume(struct device *dev, pm_message_t state, bool async)
 642{
 643        pm_callback_t callback = NULL;
 644        char *info = NULL;
 645        int error = 0;
 646        DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 647
 648        TRACE_DEVICE(dev);
 649        TRACE_RESUME(0);
 650
 651        if (dev->power.syscore)
 652                goto Complete;
 653
 654        dpm_wait(dev->parent, async);
 655        dpm_watchdog_set(&wd, dev);
 656        device_lock(dev);
 657
 658        /*
 659         * This is a fib.  But we'll allow new children to be added below
 660         * a resumed device, even if the device hasn't been completed yet.
 661         */
 662        dev->power.is_prepared = false;
 663
 664        if (!dev->power.is_suspended)
 665                goto Unlock;
 666
 667        if (dev->pm_domain) {
 668                info = "power domain ";
 669                callback = pm_op(&dev->pm_domain->ops, state);
 670                goto Driver;
 671        }
 672
 673        if (dev->type && dev->type->pm) {
 674                info = "type ";
 675                callback = pm_op(dev->type->pm, state);
 676                goto Driver;
 677        }
 678
 679        if (dev->class) {
 680                if (dev->class->pm) {
 681                        info = "class ";
 682                        callback = pm_op(dev->class->pm, state);
 683                        goto Driver;
 684                } else if (dev->class->resume) {
 685                        info = "legacy class ";
 686                        callback = dev->class->resume;
 687                        goto End;
 688                }
 689        }
 690
 691        if (dev->bus) {
 692                if (dev->bus->pm) {
 693                        info = "bus ";
 694                        callback = pm_op(dev->bus->pm, state);
 695                } else if (dev->bus->resume) {
 696                        info = "legacy bus ";
 697                        callback = dev->bus->resume;
 698                        goto End;
 699                }
 700        }
 701
 702 Driver:
 703        if (!callback && dev->driver && dev->driver->pm) {
 704                info = "driver ";
 705                callback = pm_op(dev->driver->pm, state);
 706        }
 707
 708 End:
 709        error = dpm_run_callback(callback, dev, state, info);
 710        dev->power.is_suspended = false;
 711
 712 Unlock:
 713        device_unlock(dev);
 714        dpm_watchdog_clear(&wd);
 715
 716 Complete:
 717        complete_all(&dev->power.completion);
 718
 719        TRACE_RESUME(error);
 720
 721        return error;
 722}
 723
 724static void async_resume(void *data, async_cookie_t cookie)
 725{
 726        struct device *dev = (struct device *)data;
 727        int error;
 728
 729        error = device_resume(dev, pm_transition, true);
 730        if (error)
 731                pm_dev_err(dev, pm_transition, " async", error);
 732        put_device(dev);
 733}
 734
 735static bool is_async(struct device *dev)
 736{
 737        return dev->power.async_suspend && pm_async_enabled
 738                && !pm_trace_is_enabled();
 739}
 740
 741/**
 742 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 743 * @state: PM transition of the system being carried out.
 744 *
 745 * Execute the appropriate "resume" callback for all devices whose status
 746 * indicates that they are suspended.
 747 */
 748void dpm_resume(pm_message_t state)
 749{
 750        struct device *dev;
 751        ktime_t starttime = ktime_get();
 752
 753        might_sleep();
 754
 755        mutex_lock(&dpm_list_mtx);
 756        pm_transition = state;
 757        async_error = 0;
 758
 759        list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
 760                reinit_completion(&dev->power.completion);
 761                if (is_async(dev)) {
 762                        get_device(dev);
 763                        async_schedule(async_resume, dev);
 764                }
 765        }
 766
 767        while (!list_empty(&dpm_suspended_list)) {
 768                dev = to_device(dpm_suspended_list.next);
 769                get_device(dev);
 770                if (!is_async(dev)) {
 771                        int error;
 772
 773                        mutex_unlock(&dpm_list_mtx);
 774
 775                        error = device_resume(dev, state, false);
 776                        if (error) {
 777                                suspend_stats.failed_resume++;
 778                                dpm_save_failed_step(SUSPEND_RESUME);
 779                                dpm_save_failed_dev(dev_name(dev));
 780                                pm_dev_err(dev, state, "", error);
 781                        }
 782
 783                        mutex_lock(&dpm_list_mtx);
 784                }
 785                if (!list_empty(&dev->power.entry))
 786                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
 787                put_device(dev);
 788        }
 789        mutex_unlock(&dpm_list_mtx);
 790        async_synchronize_full();
 791        dpm_show_time(starttime, state, NULL);
 792}
 793
 794/**
 795 * device_complete - Complete a PM transition for given device.
 796 * @dev: Device to handle.
 797 * @state: PM transition of the system being carried out.
 798 */
 799static void device_complete(struct device *dev, pm_message_t state)
 800{
 801        void (*callback)(struct device *) = NULL;
 802        char *info = NULL;
 803
 804        if (dev->power.syscore)
 805                return;
 806
 807        device_lock(dev);
 808
 809        if (dev->pm_domain) {
 810                info = "completing power domain ";
 811                callback = dev->pm_domain->ops.complete;
 812        } else if (dev->type && dev->type->pm) {
 813                info = "completing type ";
 814                callback = dev->type->pm->complete;
 815        } else if (dev->class && dev->class->pm) {
 816                info = "completing class ";
 817                callback = dev->class->pm->complete;
 818        } else if (dev->bus && dev->bus->pm) {
 819                info = "completing bus ";
 820                callback = dev->bus->pm->complete;
 821        }
 822
 823        if (!callback && dev->driver && dev->driver->pm) {
 824                info = "completing driver ";
 825                callback = dev->driver->pm->complete;
 826        }
 827
 828        if (callback) {
 829                pm_dev_dbg(dev, state, info);
 830                callback(dev);
 831        }
 832
 833        device_unlock(dev);
 834
 835        pm_runtime_put(dev);
 836}
 837
 838/**
 839 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 840 * @state: PM transition of the system being carried out.
 841 *
 842 * Execute the ->complete() callbacks for all devices whose PM status is not
 843 * DPM_ON (this allows new devices to be registered).
 844 */
 845void dpm_complete(pm_message_t state)
 846{
 847        struct list_head list;
 848
 849        might_sleep();
 850
 851        INIT_LIST_HEAD(&list);
 852        mutex_lock(&dpm_list_mtx);
 853        while (!list_empty(&dpm_prepared_list)) {
 854                struct device *dev = to_device(dpm_prepared_list.prev);
 855
 856                get_device(dev);
 857                dev->power.is_prepared = false;
 858                list_move(&dev->power.entry, &list);
 859                mutex_unlock(&dpm_list_mtx);
 860
 861                device_complete(dev, state);
 862
 863                mutex_lock(&dpm_list_mtx);
 864                put_device(dev);
 865        }
 866        list_splice(&list, &dpm_list);
 867        mutex_unlock(&dpm_list_mtx);
 868}
 869
 870/**
 871 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 872 * @state: PM transition of the system being carried out.
 873 *
 874 * Execute "resume" callbacks for all devices and complete the PM transition of
 875 * the system.
 876 */
 877void dpm_resume_end(pm_message_t state)
 878{
 879        dpm_resume(state);
 880        dpm_complete(state);
 881}
 882EXPORT_SYMBOL_GPL(dpm_resume_end);
 883
 884
 885/*------------------------- Suspend routines -------------------------*/
 886
 887/**
 888 * resume_event - Return a "resume" message for given "suspend" sleep state.
 889 * @sleep_state: PM message representing a sleep state.
 890 *
 891 * Return a PM message representing the resume event corresponding to given
 892 * sleep state.
 893 */
 894static pm_message_t resume_event(pm_message_t sleep_state)
 895{
 896        switch (sleep_state.event) {
 897        case PM_EVENT_SUSPEND:
 898                return PMSG_RESUME;
 899        case PM_EVENT_FREEZE:
 900        case PM_EVENT_QUIESCE:
 901                return PMSG_RECOVER;
 902        case PM_EVENT_HIBERNATE:
 903                return PMSG_RESTORE;
 904        }
 905        return PMSG_ON;
 906}
 907
 908/**
 909 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 910 * @dev: Device to handle.
 911 * @state: PM transition of the system being carried out.
 912 *
 913 * The driver of @dev will not receive interrupts while this function is being
 914 * executed.
 915 */
 916static int device_suspend_noirq(struct device *dev, pm_message_t state)
 917{
 918        pm_callback_t callback = NULL;
 919        char *info = NULL;
 920
 921        if (dev->power.syscore)
 922                return 0;
 923
 924        if (dev->pm_domain) {
 925                info = "noirq power domain ";
 926                callback = pm_noirq_op(&dev->pm_domain->ops, state);
 927        } else if (dev->type && dev->type->pm) {
 928                info = "noirq type ";
 929                callback = pm_noirq_op(dev->type->pm, state);
 930        } else if (dev->class && dev->class->pm) {
 931                info = "noirq class ";
 932                callback = pm_noirq_op(dev->class->pm, state);
 933        } else if (dev->bus && dev->bus->pm) {
 934                info = "noirq bus ";
 935                callback = pm_noirq_op(dev->bus->pm, state);
 936        }
 937
 938        if (!callback && dev->driver && dev->driver->pm) {
 939                info = "noirq driver ";
 940                callback = pm_noirq_op(dev->driver->pm, state);
 941        }
 942
 943        return dpm_run_callback(callback, dev, state, info);
 944}
 945
 946/**
 947 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
 948 * @state: PM transition of the system being carried out.
 949 *
 950 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 951 * handlers for all non-sysdev devices.
 952 */
 953static int dpm_suspend_noirq(pm_message_t state)
 954{
 955        ktime_t starttime = ktime_get();
 956        int error = 0;
 957
 958        cpuidle_pause();
 959        suspend_device_irqs();
 960        mutex_lock(&dpm_list_mtx);
 961        while (!list_empty(&dpm_late_early_list)) {
 962                struct device *dev = to_device(dpm_late_early_list.prev);
 963
 964                get_device(dev);
 965                mutex_unlock(&dpm_list_mtx);
 966
 967                error = device_suspend_noirq(dev, state);
 968
 969                mutex_lock(&dpm_list_mtx);
 970                if (error) {
 971                        pm_dev_err(dev, state, " noirq", error);
 972                        suspend_stats.failed_suspend_noirq++;
 973                        dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
 974                        dpm_save_failed_dev(dev_name(dev));
 975                        put_device(dev);
 976                        break;
 977                }
 978                if (!list_empty(&dev->power.entry))
 979                        list_move(&dev->power.entry, &dpm_noirq_list);
 980                put_device(dev);
 981
 982                if (pm_wakeup_pending()) {
 983                        error = -EBUSY;
 984                        break;
 985                }
 986        }
 987        mutex_unlock(&dpm_list_mtx);
 988        if (error)
 989                dpm_resume_noirq(resume_event(state));
 990        else
 991                dpm_show_time(starttime, state, "noirq");
 992        return error;
 993}
 994
 995/**
 996 * device_suspend_late - Execute a "late suspend" callback for given device.
 997 * @dev: Device to handle.
 998 * @state: PM transition of the system being carried out.
 999 *
1000 * Runtime PM is disabled for @dev while this function is being executed.
1001 */
1002static int device_suspend_late(struct device *dev, pm_message_t state)
1003{
1004        pm_callback_t callback = NULL;
1005        char *info = NULL;
1006
1007        __pm_runtime_disable(dev, false);
1008
1009        if (dev->power.syscore)
1010                return 0;
1011
1012        if (dev->pm_domain) {
1013                info = "late power domain ";
1014                callback = pm_late_early_op(&dev->pm_domain->ops, state);
1015        } else if (dev->type && dev->type->pm) {
1016                info = "late type ";
1017                callback = pm_late_early_op(dev->type->pm, state);
1018        } else if (dev->class && dev->class->pm) {
1019                info = "late class ";
1020                callback = pm_late_early_op(dev->class->pm, state);
1021        } else if (dev->bus && dev->bus->pm) {
1022                info = "late bus ";
1023                callback = pm_late_early_op(dev->bus->pm, state);
1024        }
1025
1026        if (!callback && dev->driver && dev->driver->pm) {
1027                info = "late driver ";
1028                callback = pm_late_early_op(dev->driver->pm, state);
1029        }
1030
1031        return dpm_run_callback(callback, dev, state, info);
1032}
1033
1034/**
1035 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1036 * @state: PM transition of the system being carried out.
1037 */
1038static int dpm_suspend_late(pm_message_t state)
1039{
1040        ktime_t starttime = ktime_get();
1041        int error = 0;
1042
1043        mutex_lock(&dpm_list_mtx);
1044        while (!list_empty(&dpm_suspended_list)) {
1045                struct device *dev = to_device(dpm_suspended_list.prev);
1046
1047                get_device(dev);
1048                mutex_unlock(&dpm_list_mtx);
1049
1050                error = device_suspend_late(dev, state);
1051
1052                mutex_lock(&dpm_list_mtx);
1053                if (error) {
1054                        pm_dev_err(dev, state, " late", error);
1055                        suspend_stats.failed_suspend_late++;
1056                        dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1057                        dpm_save_failed_dev(dev_name(dev));
1058                        put_device(dev);
1059                        break;
1060                }
1061                if (!list_empty(&dev->power.entry))
1062                        list_move(&dev->power.entry, &dpm_late_early_list);
1063                put_device(dev);
1064
1065                if (pm_wakeup_pending()) {
1066                        error = -EBUSY;
1067                        break;
1068                }
1069        }
1070        mutex_unlock(&dpm_list_mtx);
1071        if (error)
1072                dpm_resume_early(resume_event(state));
1073        else
1074                dpm_show_time(starttime, state, "late");
1075
1076        return error;
1077}
1078
1079/**
1080 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1081 * @state: PM transition of the system being carried out.
1082 */
1083int dpm_suspend_end(pm_message_t state)
1084{
1085        int error = dpm_suspend_late(state);
1086        if (error)
1087                return error;
1088
1089        error = dpm_suspend_noirq(state);
1090        if (error) {
1091                dpm_resume_early(resume_event(state));
1092                return error;
1093        }
1094
1095        return 0;
1096}
1097EXPORT_SYMBOL_GPL(dpm_suspend_end);
1098
1099/**
1100 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1101 * @dev: Device to suspend.
1102 * @state: PM transition of the system being carried out.
1103 * @cb: Suspend callback to execute.
1104 */
1105static int legacy_suspend(struct device *dev, pm_message_t state,
1106                          int (*cb)(struct device *dev, pm_message_t state),
1107                          char *info)
1108{
1109        int error;
1110        ktime_t calltime;
1111
1112        calltime = initcall_debug_start(dev);
1113
1114        error = cb(dev, state);
1115        suspend_report_result(cb, error);
1116
1117        initcall_debug_report(dev, calltime, error, state, info);
1118
1119        return error;
1120}
1121
1122/**
1123 * device_suspend - Execute "suspend" callbacks for given device.
1124 * @dev: Device to handle.
1125 * @state: PM transition of the system being carried out.
1126 * @async: If true, the device is being suspended asynchronously.
1127 */
1128static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1129{
1130        pm_callback_t callback = NULL;
1131        char *info = NULL;
1132        int error = 0;
1133        DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1134
1135        dpm_wait_for_children(dev, async);
1136
1137        if (async_error)
1138                goto Complete;
1139
1140        /*
1141         * If a device configured to wake up the system from sleep states
1142         * has been suspended at run time and there's a resume request pending
1143         * for it, this is equivalent to the device signaling wakeup, so the
1144         * system suspend operation should be aborted.
1145         */
1146        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1147                pm_wakeup_event(dev, 0);
1148
1149        if (pm_wakeup_pending()) {
1150                async_error = -EBUSY;
1151                goto Complete;
1152        }
1153
1154        if (dev->power.syscore)
1155                goto Complete;
1156
1157        dpm_watchdog_set(&wd, dev);
1158        device_lock(dev);
1159
1160        if (dev->pm_domain) {
1161                info = "power domain ";
1162                callback = pm_op(&dev->pm_domain->ops, state);
1163                goto Run;
1164        }
1165
1166        if (dev->type && dev->type->pm) {
1167                info = "type ";
1168                callback = pm_op(dev->type->pm, state);
1169                goto Run;
1170        }
1171
1172        if (dev->class) {
1173                if (dev->class->pm) {
1174                        info = "class ";
1175                        callback = pm_op(dev->class->pm, state);
1176                        goto Run;
1177                } else if (dev->class->suspend) {
1178                        pm_dev_dbg(dev, state, "legacy class ");
1179                        error = legacy_suspend(dev, state, dev->class->suspend,
1180                                                "legacy class ");
1181                        goto End;
1182                }
1183        }
1184
1185        if (dev->bus) {
1186                if (dev->bus->pm) {
1187                        info = "bus ";
1188                        callback = pm_op(dev->bus->pm, state);
1189                } else if (dev->bus->suspend) {
1190                        pm_dev_dbg(dev, state, "legacy bus ");
1191                        error = legacy_suspend(dev, state, dev->bus->suspend,
1192                                                "legacy bus ");
1193                        goto End;
1194                }
1195        }
1196
1197 Run:
1198        if (!callback && dev->driver && dev->driver->pm) {
1199                info = "driver ";
1200                callback = pm_op(dev->driver->pm, state);
1201        }
1202
1203        error = dpm_run_callback(callback, dev, state, info);
1204
1205 End:
1206        if (!error) {
1207                dev->power.is_suspended = true;
1208                if (dev->power.wakeup_path
1209                    && dev->parent && !dev->parent->power.ignore_children)
1210                        dev->parent->power.wakeup_path = true;
1211        }
1212
1213        device_unlock(dev);
1214        dpm_watchdog_clear(&wd);
1215
1216 Complete:
1217        complete_all(&dev->power.completion);
1218        if (error)
1219                async_error = error;
1220
1221        return error;
1222}
1223
1224static void async_suspend(void *data, async_cookie_t cookie)
1225{
1226        struct device *dev = (struct device *)data;
1227        int error;
1228
1229        error = __device_suspend(dev, pm_transition, true);
1230        if (error) {
1231                dpm_save_failed_dev(dev_name(dev));
1232                pm_dev_err(dev, pm_transition, " async", error);
1233        }
1234
1235        put_device(dev);
1236}
1237
1238static int device_suspend(struct device *dev)
1239{
1240        reinit_completion(&dev->power.completion);
1241
1242        if (pm_async_enabled && dev->power.async_suspend) {
1243                get_device(dev);
1244                async_schedule(async_suspend, dev);
1245                return 0;
1246        }
1247
1248        return __device_suspend(dev, pm_transition, false);
1249}
1250
1251/**
1252 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1253 * @state: PM transition of the system being carried out.
1254 */
1255int dpm_suspend(pm_message_t state)
1256{
1257        ktime_t starttime = ktime_get();
1258        int error = 0;
1259
1260        might_sleep();
1261
1262        mutex_lock(&dpm_list_mtx);
1263        pm_transition = state;
1264        async_error = 0;
1265        while (!list_empty(&dpm_prepared_list)) {
1266                struct device *dev = to_device(dpm_prepared_list.prev);
1267
1268                get_device(dev);
1269                mutex_unlock(&dpm_list_mtx);
1270
1271                error = device_suspend(dev);
1272
1273                mutex_lock(&dpm_list_mtx);
1274                if (error) {
1275                        pm_dev_err(dev, state, "", error);
1276                        dpm_save_failed_dev(dev_name(dev));
1277                        put_device(dev);
1278                        break;
1279                }
1280                if (!list_empty(&dev->power.entry))
1281                        list_move(&dev->power.entry, &dpm_suspended_list);
1282                put_device(dev);
1283                if (async_error)
1284                        break;
1285        }
1286        mutex_unlock(&dpm_list_mtx);
1287        async_synchronize_full();
1288        if (!error)
1289                error = async_error;
1290        if (error) {
1291                suspend_stats.failed_suspend++;
1292                dpm_save_failed_step(SUSPEND_SUSPEND);
1293        } else
1294                dpm_show_time(starttime, state, NULL);
1295        return error;
1296}
1297
1298/**
1299 * device_prepare - Prepare a device for system power transition.
1300 * @dev: Device to handle.
1301 * @state: PM transition of the system being carried out.
1302 *
1303 * Execute the ->prepare() callback(s) for given device.  No new children of the
1304 * device may be registered after this function has returned.
1305 */
1306static int device_prepare(struct device *dev, pm_message_t state)
1307{
1308        int (*callback)(struct device *) = NULL;
1309        char *info = NULL;
1310        int error = 0;
1311
1312        if (dev->power.syscore)
1313                return 0;
1314
1315        /*
1316         * If a device's parent goes into runtime suspend at the wrong time,
1317         * it won't be possible to resume the device.  To prevent this we
1318         * block runtime suspend here, during the prepare phase, and allow
1319         * it again during the complete phase.
1320         */
1321        pm_runtime_get_noresume(dev);
1322
1323        device_lock(dev);
1324
1325        dev->power.wakeup_path = device_may_wakeup(dev);
1326
1327        if (dev->pm_domain) {
1328                info = "preparing power domain ";
1329                callback = dev->pm_domain->ops.prepare;
1330        } else if (dev->type && dev->type->pm) {
1331                info = "preparing type ";
1332                callback = dev->type->pm->prepare;
1333        } else if (dev->class && dev->class->pm) {
1334                info = "preparing class ";
1335                callback = dev->class->pm->prepare;
1336        } else if (dev->bus && dev->bus->pm) {
1337                info = "preparing bus ";
1338                callback = dev->bus->pm->prepare;
1339        }
1340
1341        if (!callback && dev->driver && dev->driver->pm) {
1342                info = "preparing driver ";
1343                callback = dev->driver->pm->prepare;
1344        }
1345
1346        if (callback) {
1347                error = callback(dev);
1348                suspend_report_result(callback, error);
1349        }
1350
1351        device_unlock(dev);
1352
1353        if (error)
1354                pm_runtime_put(dev);
1355
1356        return error;
1357}
1358
1359/**
1360 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1361 * @state: PM transition of the system being carried out.
1362 *
1363 * Execute the ->prepare() callback(s) for all devices.
1364 */
1365int dpm_prepare(pm_message_t state)
1366{
1367        int error = 0;
1368
1369        might_sleep();
1370
1371        mutex_lock(&dpm_list_mtx);
1372        while (!list_empty(&dpm_list)) {
1373                struct device *dev = to_device(dpm_list.next);
1374
1375                get_device(dev);
1376                mutex_unlock(&dpm_list_mtx);
1377
1378                error = device_prepare(dev, state);
1379
1380                mutex_lock(&dpm_list_mtx);
1381                if (error) {
1382                        if (error == -EAGAIN) {
1383                                put_device(dev);
1384                                error = 0;
1385                                continue;
1386                        }
1387                        printk(KERN_INFO "PM: Device %s not prepared "
1388                                "for power transition: code %d\n",
1389                                dev_name(dev), error);
1390                        put_device(dev);
1391                        break;
1392                }
1393                dev->power.is_prepared = true;
1394                if (!list_empty(&dev->power.entry))
1395                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
1396                put_device(dev);
1397        }
1398        mutex_unlock(&dpm_list_mtx);
1399        return error;
1400}
1401
1402/**
1403 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1404 * @state: PM transition of the system being carried out.
1405 *
1406 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1407 * callbacks for them.
1408 */
1409int dpm_suspend_start(pm_message_t state)
1410{
1411        int error;
1412
1413        error = dpm_prepare(state);
1414        if (error) {
1415                suspend_stats.failed_prepare++;
1416                dpm_save_failed_step(SUSPEND_PREPARE);
1417        } else
1418                error = dpm_suspend(state);
1419        return error;
1420}
1421EXPORT_SYMBOL_GPL(dpm_suspend_start);
1422
1423void __suspend_report_result(const char *function, void *fn, int ret)
1424{
1425        if (ret)
1426                printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1427}
1428EXPORT_SYMBOL_GPL(__suspend_report_result);
1429
1430/**
1431 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1432 * @dev: Device to wait for.
1433 * @subordinate: Device that needs to wait for @dev.
1434 */
1435int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1436{
1437        dpm_wait(dev, subordinate->power.async_suspend);
1438        return async_error;
1439}
1440EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1441
1442/**
1443 * dpm_for_each_dev - device iterator.
1444 * @data: data for the callback.
1445 * @fn: function to be called for each device.
1446 *
1447 * Iterate over devices in dpm_list, and call @fn for each device,
1448 * passing it @data.
1449 */
1450void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1451{
1452        struct device *dev;
1453
1454        if (!fn)
1455                return;
1456
1457        device_pm_lock();
1458        list_for_each_entry(dev, &dpm_list, power.entry)
1459                fn(dev, data);
1460        device_pm_unlock();
1461}
1462EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1463