linux/drivers/base/power/main.c
<<
>>
Prefs
   1/*
   2 * drivers/base/power/main.c - Where the driver meets power management.
   3 *
   4 * Copyright (c) 2003 Patrick Mochel
   5 * Copyright (c) 2003 Open Source Development Lab
   6 *
   7 * This file is released under the GPLv2
   8 *
   9 *
  10 * The driver model core calls device_pm_add() when a device is registered.
  11 * This will initialize the embedded device_pm_info object in the device
  12 * and add it to the list of power-controlled devices. sysfs entries for
  13 * controlling device power management will also be added.
  14 *
  15 * A separate list is used for keeping track of power info, because the power
  16 * domain dependencies may differ from the ancestral dependencies that the
  17 * subsystem list maintains.
  18 */
  19
  20#include <linux/device.h>
  21#include <linux/kallsyms.h>
  22#include <linux/export.h>
  23#include <linux/mutex.h>
  24#include <linux/pm.h>
  25#include <linux/pm_runtime.h>
  26#include <linux/resume-trace.h>
  27#include <linux/interrupt.h>
  28#include <linux/sched.h>
  29#include <linux/async.h>
  30#include <linux/suspend.h>
  31#include <linux/cpuidle.h>
  32#include "../base.h"
  33#include "power.h"
  34
  35typedef int (*pm_callback_t)(struct device *);
  36
  37/*
  38 * The entries in the dpm_list list are in a depth first order, simply
  39 * because children are guaranteed to be discovered after parents, and
  40 * are inserted at the back of the list on discovery.
  41 *
  42 * Since device_pm_add() may be called with a device lock held,
  43 * we must never try to acquire a device lock while holding
  44 * dpm_list_mutex.
  45 */
  46
  47LIST_HEAD(dpm_list);
  48static LIST_HEAD(dpm_prepared_list);
  49static LIST_HEAD(dpm_suspended_list);
  50static LIST_HEAD(dpm_late_early_list);
  51static LIST_HEAD(dpm_noirq_list);
  52
  53struct suspend_stats suspend_stats;
  54static DEFINE_MUTEX(dpm_list_mtx);
  55static pm_message_t pm_transition;
  56
  57static int async_error;
  58
  59/**
  60 * device_pm_init - Initialize the PM-related part of a device object.
  61 * @dev: Device object being initialized.
  62 */
  63void device_pm_init(struct device *dev)
  64{
  65        dev->power.is_prepared = false;
  66        dev->power.is_suspended = false;
  67        init_completion(&dev->power.completion);
  68        complete_all(&dev->power.completion);
  69        dev->power.wakeup = NULL;
  70        spin_lock_init(&dev->power.lock);
  71        pm_runtime_init(dev);
  72        INIT_LIST_HEAD(&dev->power.entry);
  73        dev->power.power_state = PMSG_INVALID;
  74}
  75
  76/**
  77 * device_pm_lock - Lock the list of active devices used by the PM core.
  78 */
  79void device_pm_lock(void)
  80{
  81        mutex_lock(&dpm_list_mtx);
  82}
  83
  84/**
  85 * device_pm_unlock - Unlock the list of active devices used by the PM core.
  86 */
  87void device_pm_unlock(void)
  88{
  89        mutex_unlock(&dpm_list_mtx);
  90}
  91
  92/**
  93 * device_pm_add - Add a device to the PM core's list of active devices.
  94 * @dev: Device to add to the list.
  95 */
  96void device_pm_add(struct device *dev)
  97{
  98        pr_debug("PM: Adding info for %s:%s\n",
  99                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 100        mutex_lock(&dpm_list_mtx);
 101        if (dev->parent && dev->parent->power.is_prepared)
 102                dev_warn(dev, "parent %s should not be sleeping\n",
 103                        dev_name(dev->parent));
 104        list_add_tail(&dev->power.entry, &dpm_list);
 105        dev_pm_qos_constraints_init(dev);
 106        mutex_unlock(&dpm_list_mtx);
 107}
 108
 109/**
 110 * device_pm_remove - Remove a device from the PM core's list of active devices.
 111 * @dev: Device to be removed from the list.
 112 */
 113void device_pm_remove(struct device *dev)
 114{
 115        pr_debug("PM: Removing info for %s:%s\n",
 116                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 117        complete_all(&dev->power.completion);
 118        mutex_lock(&dpm_list_mtx);
 119        dev_pm_qos_constraints_destroy(dev);
 120        list_del_init(&dev->power.entry);
 121        mutex_unlock(&dpm_list_mtx);
 122        device_wakeup_disable(dev);
 123        pm_runtime_remove(dev);
 124}
 125
 126/**
 127 * device_pm_move_before - Move device in the PM core's list of active devices.
 128 * @deva: Device to move in dpm_list.
 129 * @devb: Device @deva should come before.
 130 */
 131void device_pm_move_before(struct device *deva, struct device *devb)
 132{
 133        pr_debug("PM: Moving %s:%s before %s:%s\n",
 134                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 135                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 136        /* Delete deva from dpm_list and reinsert before devb. */
 137        list_move_tail(&deva->power.entry, &devb->power.entry);
 138}
 139
 140/**
 141 * device_pm_move_after - Move device in the PM core's list of active devices.
 142 * @deva: Device to move in dpm_list.
 143 * @devb: Device @deva should come after.
 144 */
 145void device_pm_move_after(struct device *deva, struct device *devb)
 146{
 147        pr_debug("PM: Moving %s:%s after %s:%s\n",
 148                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 149                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 150        /* Delete deva from dpm_list and reinsert after devb. */
 151        list_move(&deva->power.entry, &devb->power.entry);
 152}
 153
 154/**
 155 * device_pm_move_last - Move device to end of the PM core's list of devices.
 156 * @dev: Device to move in dpm_list.
 157 */
 158void device_pm_move_last(struct device *dev)
 159{
 160        pr_debug("PM: Moving %s:%s to end of list\n",
 161                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 162        list_move_tail(&dev->power.entry, &dpm_list);
 163}
 164
 165static ktime_t initcall_debug_start(struct device *dev)
 166{
 167        ktime_t calltime = ktime_set(0, 0);
 168
 169        if (pm_print_times_enabled) {
 170                pr_info("calling  %s+ @ %i, parent: %s\n",
 171                        dev_name(dev), task_pid_nr(current),
 172                        dev->parent ? dev_name(dev->parent) : "none");
 173                calltime = ktime_get();
 174        }
 175
 176        return calltime;
 177}
 178
 179static void initcall_debug_report(struct device *dev, ktime_t calltime,
 180                                  int error)
 181{
 182        ktime_t delta, rettime;
 183
 184        if (pm_print_times_enabled) {
 185                rettime = ktime_get();
 186                delta = ktime_sub(rettime, calltime);
 187                pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
 188                        error, (unsigned long long)ktime_to_ns(delta) >> 10);
 189        }
 190}
 191
 192/**
 193 * dpm_wait - Wait for a PM operation to complete.
 194 * @dev: Device to wait for.
 195 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 196 */
 197static void dpm_wait(struct device *dev, bool async)
 198{
 199        if (!dev)
 200                return;
 201
 202        if (async || (pm_async_enabled && dev->power.async_suspend))
 203                wait_for_completion(&dev->power.completion);
 204}
 205
 206static int dpm_wait_fn(struct device *dev, void *async_ptr)
 207{
 208        dpm_wait(dev, *((bool *)async_ptr));
 209        return 0;
 210}
 211
 212static void dpm_wait_for_children(struct device *dev, bool async)
 213{
 214       device_for_each_child(dev, &async, dpm_wait_fn);
 215}
 216
 217/**
 218 * pm_op - Return the PM operation appropriate for given PM event.
 219 * @ops: PM operations to choose from.
 220 * @state: PM transition of the system being carried out.
 221 */
 222static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
 223{
 224        switch (state.event) {
 225#ifdef CONFIG_SUSPEND
 226        case PM_EVENT_SUSPEND:
 227                return ops->suspend;
 228        case PM_EVENT_RESUME:
 229                return ops->resume;
 230#endif /* CONFIG_SUSPEND */
 231#ifdef CONFIG_HIBERNATE_CALLBACKS
 232        case PM_EVENT_FREEZE:
 233        case PM_EVENT_QUIESCE:
 234                return ops->freeze;
 235        case PM_EVENT_HIBERNATE:
 236                return ops->poweroff;
 237        case PM_EVENT_THAW:
 238        case PM_EVENT_RECOVER:
 239                return ops->thaw;
 240                break;
 241        case PM_EVENT_RESTORE:
 242                return ops->restore;
 243#endif /* CONFIG_HIBERNATE_CALLBACKS */
 244        }
 245
 246        return NULL;
 247}
 248
 249/**
 250 * pm_late_early_op - Return the PM operation appropriate for given PM event.
 251 * @ops: PM operations to choose from.
 252 * @state: PM transition of the system being carried out.
 253 *
 254 * Runtime PM is disabled for @dev while this function is being executed.
 255 */
 256static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
 257                                      pm_message_t state)
 258{
 259        switch (state.event) {
 260#ifdef CONFIG_SUSPEND
 261        case PM_EVENT_SUSPEND:
 262                return ops->suspend_late;
 263        case PM_EVENT_RESUME:
 264                return ops->resume_early;
 265#endif /* CONFIG_SUSPEND */
 266#ifdef CONFIG_HIBERNATE_CALLBACKS
 267        case PM_EVENT_FREEZE:
 268        case PM_EVENT_QUIESCE:
 269                return ops->freeze_late;
 270        case PM_EVENT_HIBERNATE:
 271                return ops->poweroff_late;
 272        case PM_EVENT_THAW:
 273        case PM_EVENT_RECOVER:
 274                return ops->thaw_early;
 275        case PM_EVENT_RESTORE:
 276                return ops->restore_early;
 277#endif /* CONFIG_HIBERNATE_CALLBACKS */
 278        }
 279
 280        return NULL;
 281}
 282
 283/**
 284 * pm_noirq_op - Return the PM operation appropriate for given PM event.
 285 * @ops: PM operations to choose from.
 286 * @state: PM transition of the system being carried out.
 287 *
 288 * The driver of @dev will not receive interrupts while this function is being
 289 * executed.
 290 */
 291static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
 292{
 293        switch (state.event) {
 294#ifdef CONFIG_SUSPEND
 295        case PM_EVENT_SUSPEND:
 296                return ops->suspend_noirq;
 297        case PM_EVENT_RESUME:
 298                return ops->resume_noirq;
 299#endif /* CONFIG_SUSPEND */
 300#ifdef CONFIG_HIBERNATE_CALLBACKS
 301        case PM_EVENT_FREEZE:
 302        case PM_EVENT_QUIESCE:
 303                return ops->freeze_noirq;
 304        case PM_EVENT_HIBERNATE:
 305                return ops->poweroff_noirq;
 306        case PM_EVENT_THAW:
 307        case PM_EVENT_RECOVER:
 308                return ops->thaw_noirq;
 309        case PM_EVENT_RESTORE:
 310                return ops->restore_noirq;
 311#endif /* CONFIG_HIBERNATE_CALLBACKS */
 312        }
 313
 314        return NULL;
 315}
 316
 317static char *pm_verb(int event)
 318{
 319        switch (event) {
 320        case PM_EVENT_SUSPEND:
 321                return "suspend";
 322        case PM_EVENT_RESUME:
 323                return "resume";
 324        case PM_EVENT_FREEZE:
 325                return "freeze";
 326        case PM_EVENT_QUIESCE:
 327                return "quiesce";
 328        case PM_EVENT_HIBERNATE:
 329                return "hibernate";
 330        case PM_EVENT_THAW:
 331                return "thaw";
 332        case PM_EVENT_RESTORE:
 333                return "restore";
 334        case PM_EVENT_RECOVER:
 335                return "recover";
 336        default:
 337                return "(unknown PM event)";
 338        }
 339}
 340
 341static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
 342{
 343        dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 344                ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 345                ", may wakeup" : "");
 346}
 347
 348static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
 349                        int error)
 350{
 351        printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
 352                dev_name(dev), pm_verb(state.event), info, error);
 353}
 354
 355static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
 356{
 357        ktime_t calltime;
 358        u64 usecs64;
 359        int usecs;
 360
 361        calltime = ktime_get();
 362        usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 363        do_div(usecs64, NSEC_PER_USEC);
 364        usecs = usecs64;
 365        if (usecs == 0)
 366                usecs = 1;
 367        pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
 368                info ?: "", info ? " " : "", pm_verb(state.event),
 369                usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 370}
 371
 372static int dpm_run_callback(pm_callback_t cb, struct device *dev,
 373                            pm_message_t state, char *info)
 374{
 375        ktime_t calltime;
 376        int error;
 377
 378        if (!cb)
 379                return 0;
 380
 381        calltime = initcall_debug_start(dev);
 382
 383        pm_dev_dbg(dev, state, info);
 384        error = cb(dev);
 385        suspend_report_result(cb, error);
 386
 387        initcall_debug_report(dev, calltime, error);
 388
 389        return error;
 390}
 391
 392/*------------------------- Resume routines -------------------------*/
 393
 394/**
 395 * device_resume_noirq - Execute an "early resume" callback for given device.
 396 * @dev: Device to handle.
 397 * @state: PM transition of the system being carried out.
 398 *
 399 * The driver of @dev will not receive interrupts while this function is being
 400 * executed.
 401 */
 402static int device_resume_noirq(struct device *dev, pm_message_t state)
 403{
 404        pm_callback_t callback = NULL;
 405        char *info = NULL;
 406        int error = 0;
 407
 408        TRACE_DEVICE(dev);
 409        TRACE_RESUME(0);
 410
 411        if (dev->pm_domain) {
 412                info = "noirq power domain ";
 413                callback = pm_noirq_op(&dev->pm_domain->ops, state);
 414        } else if (dev->type && dev->type->pm) {
 415                info = "noirq type ";
 416                callback = pm_noirq_op(dev->type->pm, state);
 417        } else if (dev->class && dev->class->pm) {
 418                info = "noirq class ";
 419                callback = pm_noirq_op(dev->class->pm, state);
 420        } else if (dev->bus && dev->bus->pm) {
 421                info = "noirq bus ";
 422                callback = pm_noirq_op(dev->bus->pm, state);
 423        }
 424
 425        if (!callback && dev->driver && dev->driver->pm) {
 426                info = "noirq driver ";
 427                callback = pm_noirq_op(dev->driver->pm, state);
 428        }
 429
 430        error = dpm_run_callback(callback, dev, state, info);
 431
 432        TRACE_RESUME(error);
 433        return error;
 434}
 435
 436/**
 437 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
 438 * @state: PM transition of the system being carried out.
 439 *
 440 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
 441 * enable device drivers to receive interrupts.
 442 */
 443static void dpm_resume_noirq(pm_message_t state)
 444{
 445        ktime_t starttime = ktime_get();
 446
 447        mutex_lock(&dpm_list_mtx);
 448        while (!list_empty(&dpm_noirq_list)) {
 449                struct device *dev = to_device(dpm_noirq_list.next);
 450                int error;
 451
 452                get_device(dev);
 453                list_move_tail(&dev->power.entry, &dpm_late_early_list);
 454                mutex_unlock(&dpm_list_mtx);
 455
 456                error = device_resume_noirq(dev, state);
 457                if (error) {
 458                        suspend_stats.failed_resume_noirq++;
 459                        dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
 460                        dpm_save_failed_dev(dev_name(dev));
 461                        pm_dev_err(dev, state, " noirq", error);
 462                }
 463
 464                mutex_lock(&dpm_list_mtx);
 465                put_device(dev);
 466        }
 467        mutex_unlock(&dpm_list_mtx);
 468        dpm_show_time(starttime, state, "noirq");
 469        resume_device_irqs();
 470        cpuidle_resume();
 471}
 472
 473/**
 474 * device_resume_early - Execute an "early resume" callback for given device.
 475 * @dev: Device to handle.
 476 * @state: PM transition of the system being carried out.
 477 *
 478 * Runtime PM is disabled for @dev while this function is being executed.
 479 */
 480static int device_resume_early(struct device *dev, pm_message_t state)
 481{
 482        pm_callback_t callback = NULL;
 483        char *info = NULL;
 484        int error = 0;
 485
 486        TRACE_DEVICE(dev);
 487        TRACE_RESUME(0);
 488
 489        if (dev->pm_domain) {
 490                info = "early power domain ";
 491                callback = pm_late_early_op(&dev->pm_domain->ops, state);
 492        } else if (dev->type && dev->type->pm) {
 493                info = "early type ";
 494                callback = pm_late_early_op(dev->type->pm, state);
 495        } else if (dev->class && dev->class->pm) {
 496                info = "early class ";
 497                callback = pm_late_early_op(dev->class->pm, state);
 498        } else if (dev->bus && dev->bus->pm) {
 499                info = "early bus ";
 500                callback = pm_late_early_op(dev->bus->pm, state);
 501        }
 502
 503        if (!callback && dev->driver && dev->driver->pm) {
 504                info = "early driver ";
 505                callback = pm_late_early_op(dev->driver->pm, state);
 506        }
 507
 508        error = dpm_run_callback(callback, dev, state, info);
 509
 510        TRACE_RESUME(error);
 511        return error;
 512}
 513
 514/**
 515 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 516 * @state: PM transition of the system being carried out.
 517 */
 518static void dpm_resume_early(pm_message_t state)
 519{
 520        ktime_t starttime = ktime_get();
 521
 522        mutex_lock(&dpm_list_mtx);
 523        while (!list_empty(&dpm_late_early_list)) {
 524                struct device *dev = to_device(dpm_late_early_list.next);
 525                int error;
 526
 527                get_device(dev);
 528                list_move_tail(&dev->power.entry, &dpm_suspended_list);
 529                mutex_unlock(&dpm_list_mtx);
 530
 531                error = device_resume_early(dev, state);
 532                if (error) {
 533                        suspend_stats.failed_resume_early++;
 534                        dpm_save_failed_step(SUSPEND_RESUME_EARLY);
 535                        dpm_save_failed_dev(dev_name(dev));
 536                        pm_dev_err(dev, state, " early", error);
 537                }
 538
 539                mutex_lock(&dpm_list_mtx);
 540                put_device(dev);
 541        }
 542        mutex_unlock(&dpm_list_mtx);
 543        dpm_show_time(starttime, state, "early");
 544}
 545
 546/**
 547 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
 548 * @state: PM transition of the system being carried out.
 549 */
 550void dpm_resume_start(pm_message_t state)
 551{
 552        dpm_resume_noirq(state);
 553        dpm_resume_early(state);
 554}
 555EXPORT_SYMBOL_GPL(dpm_resume_start);
 556
 557/**
 558 * device_resume - Execute "resume" callbacks for given device.
 559 * @dev: Device to handle.
 560 * @state: PM transition of the system being carried out.
 561 * @async: If true, the device is being resumed asynchronously.
 562 */
 563static int device_resume(struct device *dev, pm_message_t state, bool async)
 564{
 565        pm_callback_t callback = NULL;
 566        char *info = NULL;
 567        int error = 0;
 568        bool put = false;
 569
 570        TRACE_DEVICE(dev);
 571        TRACE_RESUME(0);
 572
 573        dpm_wait(dev->parent, async);
 574        device_lock(dev);
 575
 576        /*
 577         * This is a fib.  But we'll allow new children to be added below
 578         * a resumed device, even if the device hasn't been completed yet.
 579         */
 580        dev->power.is_prepared = false;
 581
 582        if (!dev->power.is_suspended)
 583                goto Unlock;
 584
 585        pm_runtime_enable(dev);
 586        put = true;
 587
 588        if (dev->pm_domain) {
 589                info = "power domain ";
 590                callback = pm_op(&dev->pm_domain->ops, state);
 591                goto Driver;
 592        }
 593
 594        if (dev->type && dev->type->pm) {
 595                info = "type ";
 596                callback = pm_op(dev->type->pm, state);
 597                goto Driver;
 598        }
 599
 600        if (dev->class) {
 601                if (dev->class->pm) {
 602                        info = "class ";
 603                        callback = pm_op(dev->class->pm, state);
 604                        goto Driver;
 605                } else if (dev->class->resume) {
 606                        info = "legacy class ";
 607                        callback = dev->class->resume;
 608                        goto End;
 609                }
 610        }
 611
 612        if (dev->bus) {
 613                if (dev->bus->pm) {
 614                        info = "bus ";
 615                        callback = pm_op(dev->bus->pm, state);
 616                } else if (dev->bus->resume) {
 617                        info = "legacy bus ";
 618                        callback = dev->bus->resume;
 619                        goto End;
 620                }
 621        }
 622
 623 Driver:
 624        if (!callback && dev->driver && dev->driver->pm) {
 625                info = "driver ";
 626                callback = pm_op(dev->driver->pm, state);
 627        }
 628
 629 End:
 630        error = dpm_run_callback(callback, dev, state, info);
 631        dev->power.is_suspended = false;
 632
 633 Unlock:
 634        device_unlock(dev);
 635        complete_all(&dev->power.completion);
 636
 637        TRACE_RESUME(error);
 638
 639        if (put)
 640                pm_runtime_put_sync(dev);
 641
 642        return error;
 643}
 644
 645static void async_resume(void *data, async_cookie_t cookie)
 646{
 647        struct device *dev = (struct device *)data;
 648        int error;
 649
 650        error = device_resume(dev, pm_transition, true);
 651        if (error)
 652                pm_dev_err(dev, pm_transition, " async", error);
 653        put_device(dev);
 654}
 655
 656static bool is_async(struct device *dev)
 657{
 658        return dev->power.async_suspend && pm_async_enabled
 659                && !pm_trace_is_enabled();
 660}
 661
 662/**
 663 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 664 * @state: PM transition of the system being carried out.
 665 *
 666 * Execute the appropriate "resume" callback for all devices whose status
 667 * indicates that they are suspended.
 668 */
 669void dpm_resume(pm_message_t state)
 670{
 671        struct device *dev;
 672        ktime_t starttime = ktime_get();
 673
 674        might_sleep();
 675
 676        mutex_lock(&dpm_list_mtx);
 677        pm_transition = state;
 678        async_error = 0;
 679
 680        list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
 681                INIT_COMPLETION(dev->power.completion);
 682                if (is_async(dev)) {
 683                        get_device(dev);
 684                        async_schedule(async_resume, dev);
 685                }
 686        }
 687
 688        while (!list_empty(&dpm_suspended_list)) {
 689                dev = to_device(dpm_suspended_list.next);
 690                get_device(dev);
 691                if (!is_async(dev)) {
 692                        int error;
 693
 694                        mutex_unlock(&dpm_list_mtx);
 695
 696                        error = device_resume(dev, state, false);
 697                        if (error) {
 698                                suspend_stats.failed_resume++;
 699                                dpm_save_failed_step(SUSPEND_RESUME);
 700                                dpm_save_failed_dev(dev_name(dev));
 701                                pm_dev_err(dev, state, "", error);
 702                        }
 703
 704                        mutex_lock(&dpm_list_mtx);
 705                }
 706                if (!list_empty(&dev->power.entry))
 707                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
 708                put_device(dev);
 709        }
 710        mutex_unlock(&dpm_list_mtx);
 711        async_synchronize_full();
 712        dpm_show_time(starttime, state, NULL);
 713}
 714
 715/**
 716 * device_complete - Complete a PM transition for given device.
 717 * @dev: Device to handle.
 718 * @state: PM transition of the system being carried out.
 719 */
 720static void device_complete(struct device *dev, pm_message_t state)
 721{
 722        void (*callback)(struct device *) = NULL;
 723        char *info = NULL;
 724
 725        device_lock(dev);
 726
 727        if (dev->pm_domain) {
 728                info = "completing power domain ";
 729                callback = dev->pm_domain->ops.complete;
 730        } else if (dev->type && dev->type->pm) {
 731                info = "completing type ";
 732                callback = dev->type->pm->complete;
 733        } else if (dev->class && dev->class->pm) {
 734                info = "completing class ";
 735                callback = dev->class->pm->complete;
 736        } else if (dev->bus && dev->bus->pm) {
 737                info = "completing bus ";
 738                callback = dev->bus->pm->complete;
 739        }
 740
 741        if (!callback && dev->driver && dev->driver->pm) {
 742                info = "completing driver ";
 743                callback = dev->driver->pm->complete;
 744        }
 745
 746        if (callback) {
 747                pm_dev_dbg(dev, state, info);
 748                callback(dev);
 749        }
 750
 751        device_unlock(dev);
 752}
 753
 754/**
 755 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 756 * @state: PM transition of the system being carried out.
 757 *
 758 * Execute the ->complete() callbacks for all devices whose PM status is not
 759 * DPM_ON (this allows new devices to be registered).
 760 */
 761void dpm_complete(pm_message_t state)
 762{
 763        struct list_head list;
 764
 765        might_sleep();
 766
 767        INIT_LIST_HEAD(&list);
 768        mutex_lock(&dpm_list_mtx);
 769        while (!list_empty(&dpm_prepared_list)) {
 770                struct device *dev = to_device(dpm_prepared_list.prev);
 771
 772                get_device(dev);
 773                dev->power.is_prepared = false;
 774                list_move(&dev->power.entry, &list);
 775                mutex_unlock(&dpm_list_mtx);
 776
 777                device_complete(dev, state);
 778
 779                mutex_lock(&dpm_list_mtx);
 780                put_device(dev);
 781        }
 782        list_splice(&list, &dpm_list);
 783        mutex_unlock(&dpm_list_mtx);
 784}
 785
 786/**
 787 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 788 * @state: PM transition of the system being carried out.
 789 *
 790 * Execute "resume" callbacks for all devices and complete the PM transition of
 791 * the system.
 792 */
 793void dpm_resume_end(pm_message_t state)
 794{
 795        dpm_resume(state);
 796        dpm_complete(state);
 797}
 798EXPORT_SYMBOL_GPL(dpm_resume_end);
 799
 800
 801/*------------------------- Suspend routines -------------------------*/
 802
 803/**
 804 * resume_event - Return a "resume" message for given "suspend" sleep state.
 805 * @sleep_state: PM message representing a sleep state.
 806 *
 807 * Return a PM message representing the resume event corresponding to given
 808 * sleep state.
 809 */
 810static pm_message_t resume_event(pm_message_t sleep_state)
 811{
 812        switch (sleep_state.event) {
 813        case PM_EVENT_SUSPEND:
 814                return PMSG_RESUME;
 815        case PM_EVENT_FREEZE:
 816        case PM_EVENT_QUIESCE:
 817                return PMSG_RECOVER;
 818        case PM_EVENT_HIBERNATE:
 819                return PMSG_RESTORE;
 820        }
 821        return PMSG_ON;
 822}
 823
 824/**
 825 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 826 * @dev: Device to handle.
 827 * @state: PM transition of the system being carried out.
 828 *
 829 * The driver of @dev will not receive interrupts while this function is being
 830 * executed.
 831 */
 832static int device_suspend_noirq(struct device *dev, pm_message_t state)
 833{
 834        pm_callback_t callback = NULL;
 835        char *info = NULL;
 836
 837        if (dev->pm_domain) {
 838                info = "noirq power domain ";
 839                callback = pm_noirq_op(&dev->pm_domain->ops, state);
 840        } else if (dev->type && dev->type->pm) {
 841                info = "noirq type ";
 842                callback = pm_noirq_op(dev->type->pm, state);
 843        } else if (dev->class && dev->class->pm) {
 844                info = "noirq class ";
 845                callback = pm_noirq_op(dev->class->pm, state);
 846        } else if (dev->bus && dev->bus->pm) {
 847                info = "noirq bus ";
 848                callback = pm_noirq_op(dev->bus->pm, state);
 849        }
 850
 851        if (!callback && dev->driver && dev->driver->pm) {
 852                info = "noirq driver ";
 853                callback = pm_noirq_op(dev->driver->pm, state);
 854        }
 855
 856        return dpm_run_callback(callback, dev, state, info);
 857}
 858
 859/**
 860 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
 861 * @state: PM transition of the system being carried out.
 862 *
 863 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 864 * handlers for all non-sysdev devices.
 865 */
 866static int dpm_suspend_noirq(pm_message_t state)
 867{
 868        ktime_t starttime = ktime_get();
 869        int error = 0;
 870
 871        cpuidle_pause();
 872        suspend_device_irqs();
 873        mutex_lock(&dpm_list_mtx);
 874        while (!list_empty(&dpm_late_early_list)) {
 875                struct device *dev = to_device(dpm_late_early_list.prev);
 876
 877                get_device(dev);
 878                mutex_unlock(&dpm_list_mtx);
 879
 880                error = device_suspend_noirq(dev, state);
 881
 882                mutex_lock(&dpm_list_mtx);
 883                if (error) {
 884                        pm_dev_err(dev, state, " noirq", error);
 885                        suspend_stats.failed_suspend_noirq++;
 886                        dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
 887                        dpm_save_failed_dev(dev_name(dev));
 888                        put_device(dev);
 889                        break;
 890                }
 891                if (!list_empty(&dev->power.entry))
 892                        list_move(&dev->power.entry, &dpm_noirq_list);
 893                put_device(dev);
 894
 895                if (pm_wakeup_pending()) {
 896                        error = -EBUSY;
 897                        break;
 898                }
 899        }
 900        mutex_unlock(&dpm_list_mtx);
 901        if (error)
 902                dpm_resume_noirq(resume_event(state));
 903        else
 904                dpm_show_time(starttime, state, "noirq");
 905        return error;
 906}
 907
 908/**
 909 * device_suspend_late - Execute a "late suspend" callback for given device.
 910 * @dev: Device to handle.
 911 * @state: PM transition of the system being carried out.
 912 *
 913 * Runtime PM is disabled for @dev while this function is being executed.
 914 */
 915static int device_suspend_late(struct device *dev, pm_message_t state)
 916{
 917        pm_callback_t callback = NULL;
 918        char *info = NULL;
 919
 920        if (dev->pm_domain) {
 921                info = "late power domain ";
 922                callback = pm_late_early_op(&dev->pm_domain->ops, state);
 923        } else if (dev->type && dev->type->pm) {
 924                info = "late type ";
 925                callback = pm_late_early_op(dev->type->pm, state);
 926        } else if (dev->class && dev->class->pm) {
 927                info = "late class ";
 928                callback = pm_late_early_op(dev->class->pm, state);
 929        } else if (dev->bus && dev->bus->pm) {
 930                info = "late bus ";
 931                callback = pm_late_early_op(dev->bus->pm, state);
 932        }
 933
 934        if (!callback && dev->driver && dev->driver->pm) {
 935                info = "late driver ";
 936                callback = pm_late_early_op(dev->driver->pm, state);
 937        }
 938
 939        return dpm_run_callback(callback, dev, state, info);
 940}
 941
 942/**
 943 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
 944 * @state: PM transition of the system being carried out.
 945 */
 946static int dpm_suspend_late(pm_message_t state)
 947{
 948        ktime_t starttime = ktime_get();
 949        int error = 0;
 950
 951        mutex_lock(&dpm_list_mtx);
 952        while (!list_empty(&dpm_suspended_list)) {
 953                struct device *dev = to_device(dpm_suspended_list.prev);
 954
 955                get_device(dev);
 956                mutex_unlock(&dpm_list_mtx);
 957
 958                error = device_suspend_late(dev, state);
 959
 960                mutex_lock(&dpm_list_mtx);
 961                if (error) {
 962                        pm_dev_err(dev, state, " late", error);
 963                        suspend_stats.failed_suspend_late++;
 964                        dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
 965                        dpm_save_failed_dev(dev_name(dev));
 966                        put_device(dev);
 967                        break;
 968                }
 969                if (!list_empty(&dev->power.entry))
 970                        list_move(&dev->power.entry, &dpm_late_early_list);
 971                put_device(dev);
 972
 973                if (pm_wakeup_pending()) {
 974                        error = -EBUSY;
 975                        break;
 976                }
 977        }
 978        mutex_unlock(&dpm_list_mtx);
 979        if (error)
 980                dpm_resume_early(resume_event(state));
 981        else
 982                dpm_show_time(starttime, state, "late");
 983
 984        return error;
 985}
 986
 987/**
 988 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
 989 * @state: PM transition of the system being carried out.
 990 */
 991int dpm_suspend_end(pm_message_t state)
 992{
 993        int error = dpm_suspend_late(state);
 994        if (error)
 995                return error;
 996
 997        error = dpm_suspend_noirq(state);
 998        if (error) {
 999                dpm_resume_early(state);
1000                return error;
1001        }
1002
1003        return 0;
1004}
1005EXPORT_SYMBOL_GPL(dpm_suspend_end);
1006
1007/**
1008 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1009 * @dev: Device to suspend.
1010 * @state: PM transition of the system being carried out.
1011 * @cb: Suspend callback to execute.
1012 */
1013static int legacy_suspend(struct device *dev, pm_message_t state,
1014                          int (*cb)(struct device *dev, pm_message_t state))
1015{
1016        int error;
1017        ktime_t calltime;
1018
1019        calltime = initcall_debug_start(dev);
1020
1021        error = cb(dev, state);
1022        suspend_report_result(cb, error);
1023
1024        initcall_debug_report(dev, calltime, error);
1025
1026        return error;
1027}
1028
1029/**
1030 * device_suspend - Execute "suspend" callbacks for given device.
1031 * @dev: Device to handle.
1032 * @state: PM transition of the system being carried out.
1033 * @async: If true, the device is being suspended asynchronously.
1034 */
1035static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1036{
1037        pm_callback_t callback = NULL;
1038        char *info = NULL;
1039        int error = 0;
1040
1041        dpm_wait_for_children(dev, async);
1042
1043        if (async_error)
1044                goto Complete;
1045
1046        pm_runtime_get_noresume(dev);
1047        if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1048                pm_wakeup_event(dev, 0);
1049
1050        if (pm_wakeup_pending()) {
1051                pm_runtime_put_sync(dev);
1052                async_error = -EBUSY;
1053                goto Complete;
1054        }
1055
1056        device_lock(dev);
1057
1058        if (dev->pm_domain) {
1059                info = "power domain ";
1060                callback = pm_op(&dev->pm_domain->ops, state);
1061                goto Run;
1062        }
1063
1064        if (dev->type && dev->type->pm) {
1065                info = "type ";
1066                callback = pm_op(dev->type->pm, state);
1067                goto Run;
1068        }
1069
1070        if (dev->class) {
1071                if (dev->class->pm) {
1072                        info = "class ";
1073                        callback = pm_op(dev->class->pm, state);
1074                        goto Run;
1075                } else if (dev->class->suspend) {
1076                        pm_dev_dbg(dev, state, "legacy class ");
1077                        error = legacy_suspend(dev, state, dev->class->suspend);
1078                        goto End;
1079                }
1080        }
1081
1082        if (dev->bus) {
1083                if (dev->bus->pm) {
1084                        info = "bus ";
1085                        callback = pm_op(dev->bus->pm, state);
1086                } else if (dev->bus->suspend) {
1087                        pm_dev_dbg(dev, state, "legacy bus ");
1088                        error = legacy_suspend(dev, state, dev->bus->suspend);
1089                        goto End;
1090                }
1091        }
1092
1093 Run:
1094        if (!callback && dev->driver && dev->driver->pm) {
1095                info = "driver ";
1096                callback = pm_op(dev->driver->pm, state);
1097        }
1098
1099        error = dpm_run_callback(callback, dev, state, info);
1100
1101 End:
1102        if (!error) {
1103                dev->power.is_suspended = true;
1104                if (dev->power.wakeup_path
1105                    && dev->parent && !dev->parent->power.ignore_children)
1106                        dev->parent->power.wakeup_path = true;
1107        }
1108
1109        device_unlock(dev);
1110
1111 Complete:
1112        complete_all(&dev->power.completion);
1113
1114        if (error) {
1115                pm_runtime_put_sync(dev);
1116                async_error = error;
1117        } else if (dev->power.is_suspended) {
1118                __pm_runtime_disable(dev, false);
1119        }
1120
1121        return error;
1122}
1123
1124static void async_suspend(void *data, async_cookie_t cookie)
1125{
1126        struct device *dev = (struct device *)data;
1127        int error;
1128
1129        error = __device_suspend(dev, pm_transition, true);
1130        if (error) {
1131                dpm_save_failed_dev(dev_name(dev));
1132                pm_dev_err(dev, pm_transition, " async", error);
1133        }
1134
1135        put_device(dev);
1136}
1137
1138static int device_suspend(struct device *dev)
1139{
1140        INIT_COMPLETION(dev->power.completion);
1141
1142        if (pm_async_enabled && dev->power.async_suspend) {
1143                get_device(dev);
1144                async_schedule(async_suspend, dev);
1145                return 0;
1146        }
1147
1148        return __device_suspend(dev, pm_transition, false);
1149}
1150
1151/**
1152 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1153 * @state: PM transition of the system being carried out.
1154 */
1155int dpm_suspend(pm_message_t state)
1156{
1157        ktime_t starttime = ktime_get();
1158        int error = 0;
1159
1160        might_sleep();
1161
1162        mutex_lock(&dpm_list_mtx);
1163        pm_transition = state;
1164        async_error = 0;
1165        while (!list_empty(&dpm_prepared_list)) {
1166                struct device *dev = to_device(dpm_prepared_list.prev);
1167
1168                get_device(dev);
1169                mutex_unlock(&dpm_list_mtx);
1170
1171                error = device_suspend(dev);
1172
1173                mutex_lock(&dpm_list_mtx);
1174                if (error) {
1175                        pm_dev_err(dev, state, "", error);
1176                        dpm_save_failed_dev(dev_name(dev));
1177                        put_device(dev);
1178                        break;
1179                }
1180                if (!list_empty(&dev->power.entry))
1181                        list_move(&dev->power.entry, &dpm_suspended_list);
1182                put_device(dev);
1183                if (async_error)
1184                        break;
1185        }
1186        mutex_unlock(&dpm_list_mtx);
1187        async_synchronize_full();
1188        if (!error)
1189                error = async_error;
1190        if (error) {
1191                suspend_stats.failed_suspend++;
1192                dpm_save_failed_step(SUSPEND_SUSPEND);
1193        } else
1194                dpm_show_time(starttime, state, NULL);
1195        return error;
1196}
1197
1198/**
1199 * device_prepare - Prepare a device for system power transition.
1200 * @dev: Device to handle.
1201 * @state: PM transition of the system being carried out.
1202 *
1203 * Execute the ->prepare() callback(s) for given device.  No new children of the
1204 * device may be registered after this function has returned.
1205 */
1206static int device_prepare(struct device *dev, pm_message_t state)
1207{
1208        int (*callback)(struct device *) = NULL;
1209        char *info = NULL;
1210        int error = 0;
1211
1212        device_lock(dev);
1213
1214        dev->power.wakeup_path = device_may_wakeup(dev);
1215
1216        if (dev->pm_domain) {
1217                info = "preparing power domain ";
1218                callback = dev->pm_domain->ops.prepare;
1219        } else if (dev->type && dev->type->pm) {
1220                info = "preparing type ";
1221                callback = dev->type->pm->prepare;
1222        } else if (dev->class && dev->class->pm) {
1223                info = "preparing class ";
1224                callback = dev->class->pm->prepare;
1225        } else if (dev->bus && dev->bus->pm) {
1226                info = "preparing bus ";
1227                callback = dev->bus->pm->prepare;
1228        }
1229
1230        if (!callback && dev->driver && dev->driver->pm) {
1231                info = "preparing driver ";
1232                callback = dev->driver->pm->prepare;
1233        }
1234
1235        if (callback) {
1236                error = callback(dev);
1237                suspend_report_result(callback, error);
1238        }
1239
1240        device_unlock(dev);
1241
1242        return error;
1243}
1244
1245/**
1246 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1247 * @state: PM transition of the system being carried out.
1248 *
1249 * Execute the ->prepare() callback(s) for all devices.
1250 */
1251int dpm_prepare(pm_message_t state)
1252{
1253        int error = 0;
1254
1255        might_sleep();
1256
1257        mutex_lock(&dpm_list_mtx);
1258        while (!list_empty(&dpm_list)) {
1259                struct device *dev = to_device(dpm_list.next);
1260
1261                get_device(dev);
1262                mutex_unlock(&dpm_list_mtx);
1263
1264                error = device_prepare(dev, state);
1265
1266                mutex_lock(&dpm_list_mtx);
1267                if (error) {
1268                        if (error == -EAGAIN) {
1269                                put_device(dev);
1270                                error = 0;
1271                                continue;
1272                        }
1273                        printk(KERN_INFO "PM: Device %s not prepared "
1274                                "for power transition: code %d\n",
1275                                dev_name(dev), error);
1276                        put_device(dev);
1277                        break;
1278                }
1279                dev->power.is_prepared = true;
1280                if (!list_empty(&dev->power.entry))
1281                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
1282                put_device(dev);
1283        }
1284        mutex_unlock(&dpm_list_mtx);
1285        return error;
1286}
1287
1288/**
1289 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1290 * @state: PM transition of the system being carried out.
1291 *
1292 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1293 * callbacks for them.
1294 */
1295int dpm_suspend_start(pm_message_t state)
1296{
1297        int error;
1298
1299        error = dpm_prepare(state);
1300        if (error) {
1301                suspend_stats.failed_prepare++;
1302                dpm_save_failed_step(SUSPEND_PREPARE);
1303        } else
1304                error = dpm_suspend(state);
1305        return error;
1306}
1307EXPORT_SYMBOL_GPL(dpm_suspend_start);
1308
1309void __suspend_report_result(const char *function, void *fn, int ret)
1310{
1311        if (ret)
1312                printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1313}
1314EXPORT_SYMBOL_GPL(__suspend_report_result);
1315
1316/**
1317 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1318 * @dev: Device to wait for.
1319 * @subordinate: Device that needs to wait for @dev.
1320 */
1321int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1322{
1323        dpm_wait(dev, subordinate->power.async_suspend);
1324        return async_error;
1325}
1326EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1327