linux/drivers/base/power/main.c
<<
>>
Prefs
   1/*
   2 * drivers/base/power/main.c - Where the driver meets power management.
   3 *
   4 * Copyright (c) 2003 Patrick Mochel
   5 * Copyright (c) 2003 Open Source Development Lab
   6 *
   7 * This file is released under the GPLv2
   8 *
   9 *
  10 * The driver model core calls device_pm_add() when a device is registered.
  11 * This will initialize the embedded device_pm_info object in the device
  12 * and add it to the list of power-controlled devices. sysfs entries for
  13 * controlling device power management will also be added.
  14 *
  15 * A separate list is used for keeping track of power info, because the power
  16 * domain dependencies may differ from the ancestral dependencies that the
  17 * subsystem list maintains.
  18 */
  19
  20#include <linux/device.h>
  21#include <linux/kallsyms.h>
  22#include <linux/mutex.h>
  23#include <linux/pm.h>
  24#include <linux/pm_runtime.h>
  25#include <linux/resume-trace.h>
  26#include <linux/interrupt.h>
  27#include <linux/sched.h>
  28#include <linux/async.h>
  29#include <linux/suspend.h>
  30
  31#include "../base.h"
  32#include "power.h"
  33
  34/*
  35 * The entries in the dpm_list list are in a depth first order, simply
  36 * because children are guaranteed to be discovered after parents, and
  37 * are inserted at the back of the list on discovery.
  38 *
  39 * Since device_pm_add() may be called with a device lock held,
  40 * we must never try to acquire a device lock while holding
  41 * dpm_list_mutex.
  42 */
  43
  44LIST_HEAD(dpm_list);
  45LIST_HEAD(dpm_prepared_list);
  46LIST_HEAD(dpm_suspended_list);
  47LIST_HEAD(dpm_noirq_list);
  48
  49static DEFINE_MUTEX(dpm_list_mtx);
  50static pm_message_t pm_transition;
  51
  52static int async_error;
  53
  54/**
  55 * device_pm_init - Initialize the PM-related part of a device object.
  56 * @dev: Device object being initialized.
  57 */
  58void device_pm_init(struct device *dev)
  59{
  60        dev->power.is_prepared = false;
  61        dev->power.is_suspended = false;
  62        init_completion(&dev->power.completion);
  63        complete_all(&dev->power.completion);
  64        dev->power.wakeup = NULL;
  65        spin_lock_init(&dev->power.lock);
  66        pm_runtime_init(dev);
  67        INIT_LIST_HEAD(&dev->power.entry);
  68}
  69
  70/**
  71 * device_pm_lock - Lock the list of active devices used by the PM core.
  72 */
  73void device_pm_lock(void)
  74{
  75        mutex_lock(&dpm_list_mtx);
  76}
  77
  78/**
  79 * device_pm_unlock - Unlock the list of active devices used by the PM core.
  80 */
  81void device_pm_unlock(void)
  82{
  83        mutex_unlock(&dpm_list_mtx);
  84}
  85
  86/**
  87 * device_pm_add - Add a device to the PM core's list of active devices.
  88 * @dev: Device to add to the list.
  89 */
  90void device_pm_add(struct device *dev)
  91{
  92        pr_debug("PM: Adding info for %s:%s\n",
  93                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
  94        mutex_lock(&dpm_list_mtx);
  95        if (dev->parent && dev->parent->power.is_prepared)
  96                dev_warn(dev, "parent %s should not be sleeping\n",
  97                        dev_name(dev->parent));
  98        list_add_tail(&dev->power.entry, &dpm_list);
  99        mutex_unlock(&dpm_list_mtx);
 100}
 101
 102/**
 103 * device_pm_remove - Remove a device from the PM core's list of active devices.
 104 * @dev: Device to be removed from the list.
 105 */
 106void device_pm_remove(struct device *dev)
 107{
 108        pr_debug("PM: Removing info for %s:%s\n",
 109                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 110        complete_all(&dev->power.completion);
 111        mutex_lock(&dpm_list_mtx);
 112        list_del_init(&dev->power.entry);
 113        mutex_unlock(&dpm_list_mtx);
 114        device_wakeup_disable(dev);
 115        pm_runtime_remove(dev);
 116}
 117
 118/**
 119 * device_pm_move_before - Move device in the PM core's list of active devices.
 120 * @deva: Device to move in dpm_list.
 121 * @devb: Device @deva should come before.
 122 */
 123void device_pm_move_before(struct device *deva, struct device *devb)
 124{
 125        pr_debug("PM: Moving %s:%s before %s:%s\n",
 126                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 127                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 128        /* Delete deva from dpm_list and reinsert before devb. */
 129        list_move_tail(&deva->power.entry, &devb->power.entry);
 130}
 131
 132/**
 133 * device_pm_move_after - Move device in the PM core's list of active devices.
 134 * @deva: Device to move in dpm_list.
 135 * @devb: Device @deva should come after.
 136 */
 137void device_pm_move_after(struct device *deva, struct device *devb)
 138{
 139        pr_debug("PM: Moving %s:%s after %s:%s\n",
 140                 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 141                 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
 142        /* Delete deva from dpm_list and reinsert after devb. */
 143        list_move(&deva->power.entry, &devb->power.entry);
 144}
 145
 146/**
 147 * device_pm_move_last - Move device to end of the PM core's list of devices.
 148 * @dev: Device to move in dpm_list.
 149 */
 150void device_pm_move_last(struct device *dev)
 151{
 152        pr_debug("PM: Moving %s:%s to end of list\n",
 153                 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
 154        list_move_tail(&dev->power.entry, &dpm_list);
 155}
 156
 157static ktime_t initcall_debug_start(struct device *dev)
 158{
 159        ktime_t calltime = ktime_set(0, 0);
 160
 161        if (initcall_debug) {
 162                pr_info("calling  %s+ @ %i\n",
 163                                dev_name(dev), task_pid_nr(current));
 164                calltime = ktime_get();
 165        }
 166
 167        return calltime;
 168}
 169
 170static void initcall_debug_report(struct device *dev, ktime_t calltime,
 171                                  int error)
 172{
 173        ktime_t delta, rettime;
 174
 175        if (initcall_debug) {
 176                rettime = ktime_get();
 177                delta = ktime_sub(rettime, calltime);
 178                pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
 179                        error, (unsigned long long)ktime_to_ns(delta) >> 10);
 180        }
 181}
 182
 183/**
 184 * dpm_wait - Wait for a PM operation to complete.
 185 * @dev: Device to wait for.
 186 * @async: If unset, wait only if the device's power.async_suspend flag is set.
 187 */
 188static void dpm_wait(struct device *dev, bool async)
 189{
 190        if (!dev)
 191                return;
 192
 193        if (async || (pm_async_enabled && dev->power.async_suspend))
 194                wait_for_completion(&dev->power.completion);
 195}
 196
 197static int dpm_wait_fn(struct device *dev, void *async_ptr)
 198{
 199        dpm_wait(dev, *((bool *)async_ptr));
 200        return 0;
 201}
 202
 203static void dpm_wait_for_children(struct device *dev, bool async)
 204{
 205       device_for_each_child(dev, &async, dpm_wait_fn);
 206}
 207
 208/**
 209 * pm_op - Execute the PM operation appropriate for given PM event.
 210 * @dev: Device to handle.
 211 * @ops: PM operations to choose from.
 212 * @state: PM transition of the system being carried out.
 213 */
 214static int pm_op(struct device *dev,
 215                 const struct dev_pm_ops *ops,
 216                 pm_message_t state)
 217{
 218        int error = 0;
 219        ktime_t calltime;
 220
 221        calltime = initcall_debug_start(dev);
 222
 223        switch (state.event) {
 224#ifdef CONFIG_SUSPEND
 225        case PM_EVENT_SUSPEND:
 226                if (ops->suspend) {
 227                        error = ops->suspend(dev);
 228                        suspend_report_result(ops->suspend, error);
 229                }
 230                break;
 231        case PM_EVENT_RESUME:
 232                if (ops->resume) {
 233                        error = ops->resume(dev);
 234                        suspend_report_result(ops->resume, error);
 235                }
 236                break;
 237#endif /* CONFIG_SUSPEND */
 238#ifdef CONFIG_HIBERNATE_CALLBACKS
 239        case PM_EVENT_FREEZE:
 240        case PM_EVENT_QUIESCE:
 241                if (ops->freeze) {
 242                        error = ops->freeze(dev);
 243                        suspend_report_result(ops->freeze, error);
 244                }
 245                break;
 246        case PM_EVENT_HIBERNATE:
 247                if (ops->poweroff) {
 248                        error = ops->poweroff(dev);
 249                        suspend_report_result(ops->poweroff, error);
 250                }
 251                break;
 252        case PM_EVENT_THAW:
 253        case PM_EVENT_RECOVER:
 254                if (ops->thaw) {
 255                        error = ops->thaw(dev);
 256                        suspend_report_result(ops->thaw, error);
 257                }
 258                break;
 259        case PM_EVENT_RESTORE:
 260                if (ops->restore) {
 261                        error = ops->restore(dev);
 262                        suspend_report_result(ops->restore, error);
 263                }
 264                break;
 265#endif /* CONFIG_HIBERNATE_CALLBACKS */
 266        default:
 267                error = -EINVAL;
 268        }
 269
 270        initcall_debug_report(dev, calltime, error);
 271
 272        return error;
 273}
 274
 275/**
 276 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
 277 * @dev: Device to handle.
 278 * @ops: PM operations to choose from.
 279 * @state: PM transition of the system being carried out.
 280 *
 281 * The driver of @dev will not receive interrupts while this function is being
 282 * executed.
 283 */
 284static int pm_noirq_op(struct device *dev,
 285                        const struct dev_pm_ops *ops,
 286                        pm_message_t state)
 287{
 288        int error = 0;
 289        ktime_t calltime = ktime_set(0, 0), delta, rettime;
 290
 291        if (initcall_debug) {
 292                pr_info("calling  %s+ @ %i, parent: %s\n",
 293                                dev_name(dev), task_pid_nr(current),
 294                                dev->parent ? dev_name(dev->parent) : "none");
 295                calltime = ktime_get();
 296        }
 297
 298        switch (state.event) {
 299#ifdef CONFIG_SUSPEND
 300        case PM_EVENT_SUSPEND:
 301                if (ops->suspend_noirq) {
 302                        error = ops->suspend_noirq(dev);
 303                        suspend_report_result(ops->suspend_noirq, error);
 304                }
 305                break;
 306        case PM_EVENT_RESUME:
 307                if (ops->resume_noirq) {
 308                        error = ops->resume_noirq(dev);
 309                        suspend_report_result(ops->resume_noirq, error);
 310                }
 311                break;
 312#endif /* CONFIG_SUSPEND */
 313#ifdef CONFIG_HIBERNATE_CALLBACKS
 314        case PM_EVENT_FREEZE:
 315        case PM_EVENT_QUIESCE:
 316                if (ops->freeze_noirq) {
 317                        error = ops->freeze_noirq(dev);
 318                        suspend_report_result(ops->freeze_noirq, error);
 319                }
 320                break;
 321        case PM_EVENT_HIBERNATE:
 322                if (ops->poweroff_noirq) {
 323                        error = ops->poweroff_noirq(dev);
 324                        suspend_report_result(ops->poweroff_noirq, error);
 325                }
 326                break;
 327        case PM_EVENT_THAW:
 328        case PM_EVENT_RECOVER:
 329                if (ops->thaw_noirq) {
 330                        error = ops->thaw_noirq(dev);
 331                        suspend_report_result(ops->thaw_noirq, error);
 332                }
 333                break;
 334        case PM_EVENT_RESTORE:
 335                if (ops->restore_noirq) {
 336                        error = ops->restore_noirq(dev);
 337                        suspend_report_result(ops->restore_noirq, error);
 338                }
 339                break;
 340#endif /* CONFIG_HIBERNATE_CALLBACKS */
 341        default:
 342                error = -EINVAL;
 343        }
 344
 345        if (initcall_debug) {
 346                rettime = ktime_get();
 347                delta = ktime_sub(rettime, calltime);
 348                printk("initcall %s_i+ returned %d after %Ld usecs\n",
 349                        dev_name(dev), error,
 350                        (unsigned long long)ktime_to_ns(delta) >> 10);
 351        }
 352
 353        return error;
 354}
 355
 356static char *pm_verb(int event)
 357{
 358        switch (event) {
 359        case PM_EVENT_SUSPEND:
 360                return "suspend";
 361        case PM_EVENT_RESUME:
 362                return "resume";
 363        case PM_EVENT_FREEZE:
 364                return "freeze";
 365        case PM_EVENT_QUIESCE:
 366                return "quiesce";
 367        case PM_EVENT_HIBERNATE:
 368                return "hibernate";
 369        case PM_EVENT_THAW:
 370                return "thaw";
 371        case PM_EVENT_RESTORE:
 372                return "restore";
 373        case PM_EVENT_RECOVER:
 374                return "recover";
 375        default:
 376                return "(unknown PM event)";
 377        }
 378}
 379
 380static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
 381{
 382        dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
 383                ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
 384                ", may wakeup" : "");
 385}
 386
 387static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
 388                        int error)
 389{
 390        printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
 391                dev_name(dev), pm_verb(state.event), info, error);
 392}
 393
 394static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
 395{
 396        ktime_t calltime;
 397        u64 usecs64;
 398        int usecs;
 399
 400        calltime = ktime_get();
 401        usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
 402        do_div(usecs64, NSEC_PER_USEC);
 403        usecs = usecs64;
 404        if (usecs == 0)
 405                usecs = 1;
 406        pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
 407                info ?: "", info ? " " : "", pm_verb(state.event),
 408                usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
 409}
 410
 411/*------------------------- Resume routines -------------------------*/
 412
 413/**
 414 * device_resume_noirq - Execute an "early resume" callback for given device.
 415 * @dev: Device to handle.
 416 * @state: PM transition of the system being carried out.
 417 *
 418 * The driver of @dev will not receive interrupts while this function is being
 419 * executed.
 420 */
 421static int device_resume_noirq(struct device *dev, pm_message_t state)
 422{
 423        int error = 0;
 424
 425        TRACE_DEVICE(dev);
 426        TRACE_RESUME(0);
 427
 428        if (dev->pwr_domain) {
 429                pm_dev_dbg(dev, state, "EARLY power domain ");
 430                error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
 431        } else if (dev->type && dev->type->pm) {
 432                pm_dev_dbg(dev, state, "EARLY type ");
 433                error = pm_noirq_op(dev, dev->type->pm, state);
 434        } else if (dev->class && dev->class->pm) {
 435                pm_dev_dbg(dev, state, "EARLY class ");
 436                error = pm_noirq_op(dev, dev->class->pm, state);
 437        } else if (dev->bus && dev->bus->pm) {
 438                pm_dev_dbg(dev, state, "EARLY ");
 439                error = pm_noirq_op(dev, dev->bus->pm, state);
 440        }
 441
 442        TRACE_RESUME(error);
 443        return error;
 444}
 445
 446/**
 447 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
 448 * @state: PM transition of the system being carried out.
 449 *
 450 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
 451 * enable device drivers to receive interrupts.
 452 */
 453void dpm_resume_noirq(pm_message_t state)
 454{
 455        ktime_t starttime = ktime_get();
 456
 457        mutex_lock(&dpm_list_mtx);
 458        while (!list_empty(&dpm_noirq_list)) {
 459                struct device *dev = to_device(dpm_noirq_list.next);
 460                int error;
 461
 462                get_device(dev);
 463                list_move_tail(&dev->power.entry, &dpm_suspended_list);
 464                mutex_unlock(&dpm_list_mtx);
 465
 466                error = device_resume_noirq(dev, state);
 467                if (error)
 468                        pm_dev_err(dev, state, " early", error);
 469
 470                mutex_lock(&dpm_list_mtx);
 471                put_device(dev);
 472        }
 473        mutex_unlock(&dpm_list_mtx);
 474        dpm_show_time(starttime, state, "early");
 475        resume_device_irqs();
 476}
 477EXPORT_SYMBOL_GPL(dpm_resume_noirq);
 478
 479/**
 480 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
 481 * @dev: Device to resume.
 482 * @cb: Resume callback to execute.
 483 */
 484static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
 485{
 486        int error;
 487        ktime_t calltime;
 488
 489        calltime = initcall_debug_start(dev);
 490
 491        error = cb(dev);
 492        suspend_report_result(cb, error);
 493
 494        initcall_debug_report(dev, calltime, error);
 495
 496        return error;
 497}
 498
 499/**
 500 * device_resume - Execute "resume" callbacks for given device.
 501 * @dev: Device to handle.
 502 * @state: PM transition of the system being carried out.
 503 * @async: If true, the device is being resumed asynchronously.
 504 */
 505static int device_resume(struct device *dev, pm_message_t state, bool async)
 506{
 507        int error = 0;
 508
 509        TRACE_DEVICE(dev);
 510        TRACE_RESUME(0);
 511
 512        dpm_wait(dev->parent, async);
 513        device_lock(dev);
 514
 515        /*
 516         * This is a fib.  But we'll allow new children to be added below
 517         * a resumed device, even if the device hasn't been completed yet.
 518         */
 519        dev->power.is_prepared = false;
 520
 521        if (!dev->power.is_suspended)
 522                goto Unlock;
 523
 524        if (dev->pwr_domain) {
 525                pm_dev_dbg(dev, state, "power domain ");
 526                error = pm_op(dev, &dev->pwr_domain->ops, state);
 527                goto End;
 528        }
 529
 530        if (dev->type && dev->type->pm) {
 531                pm_dev_dbg(dev, state, "type ");
 532                error = pm_op(dev, dev->type->pm, state);
 533                goto End;
 534        }
 535
 536        if (dev->class) {
 537                if (dev->class->pm) {
 538                        pm_dev_dbg(dev, state, "class ");
 539                        error = pm_op(dev, dev->class->pm, state);
 540                        goto End;
 541                } else if (dev->class->resume) {
 542                        pm_dev_dbg(dev, state, "legacy class ");
 543                        error = legacy_resume(dev, dev->class->resume);
 544                        goto End;
 545                }
 546        }
 547
 548        if (dev->bus) {
 549                if (dev->bus->pm) {
 550                        pm_dev_dbg(dev, state, "");
 551                        error = pm_op(dev, dev->bus->pm, state);
 552                } else if (dev->bus->resume) {
 553                        pm_dev_dbg(dev, state, "legacy ");
 554                        error = legacy_resume(dev, dev->bus->resume);
 555                }
 556        }
 557
 558 End:
 559        dev->power.is_suspended = false;
 560
 561 Unlock:
 562        device_unlock(dev);
 563        complete_all(&dev->power.completion);
 564
 565        TRACE_RESUME(error);
 566        return error;
 567}
 568
 569static void async_resume(void *data, async_cookie_t cookie)
 570{
 571        struct device *dev = (struct device *)data;
 572        int error;
 573
 574        error = device_resume(dev, pm_transition, true);
 575        if (error)
 576                pm_dev_err(dev, pm_transition, " async", error);
 577        put_device(dev);
 578}
 579
 580static bool is_async(struct device *dev)
 581{
 582        return dev->power.async_suspend && pm_async_enabled
 583                && !pm_trace_is_enabled();
 584}
 585
 586/**
 587 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 588 * @state: PM transition of the system being carried out.
 589 *
 590 * Execute the appropriate "resume" callback for all devices whose status
 591 * indicates that they are suspended.
 592 */
 593void dpm_resume(pm_message_t state)
 594{
 595        struct device *dev;
 596        ktime_t starttime = ktime_get();
 597
 598        might_sleep();
 599
 600        mutex_lock(&dpm_list_mtx);
 601        pm_transition = state;
 602        async_error = 0;
 603
 604        list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
 605                INIT_COMPLETION(dev->power.completion);
 606                if (is_async(dev)) {
 607                        get_device(dev);
 608                        async_schedule(async_resume, dev);
 609                }
 610        }
 611
 612        while (!list_empty(&dpm_suspended_list)) {
 613                dev = to_device(dpm_suspended_list.next);
 614                get_device(dev);
 615                if (!is_async(dev)) {
 616                        int error;
 617
 618                        mutex_unlock(&dpm_list_mtx);
 619
 620                        error = device_resume(dev, state, false);
 621                        if (error)
 622                                pm_dev_err(dev, state, "", error);
 623
 624                        mutex_lock(&dpm_list_mtx);
 625                }
 626                if (!list_empty(&dev->power.entry))
 627                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
 628                put_device(dev);
 629        }
 630        mutex_unlock(&dpm_list_mtx);
 631        async_synchronize_full();
 632        dpm_show_time(starttime, state, NULL);
 633}
 634
 635/**
 636 * device_complete - Complete a PM transition for given device.
 637 * @dev: Device to handle.
 638 * @state: PM transition of the system being carried out.
 639 */
 640static void device_complete(struct device *dev, pm_message_t state)
 641{
 642        device_lock(dev);
 643
 644        if (dev->pwr_domain) {
 645                pm_dev_dbg(dev, state, "completing power domain ");
 646                if (dev->pwr_domain->ops.complete)
 647                        dev->pwr_domain->ops.complete(dev);
 648        } else if (dev->type && dev->type->pm) {
 649                pm_dev_dbg(dev, state, "completing type ");
 650                if (dev->type->pm->complete)
 651                        dev->type->pm->complete(dev);
 652        } else if (dev->class && dev->class->pm) {
 653                pm_dev_dbg(dev, state, "completing class ");
 654                if (dev->class->pm->complete)
 655                        dev->class->pm->complete(dev);
 656        } else if (dev->bus && dev->bus->pm) {
 657                pm_dev_dbg(dev, state, "completing ");
 658                if (dev->bus->pm->complete)
 659                        dev->bus->pm->complete(dev);
 660        }
 661
 662        device_unlock(dev);
 663}
 664
 665/**
 666 * dpm_complete - Complete a PM transition for all non-sysdev devices.
 667 * @state: PM transition of the system being carried out.
 668 *
 669 * Execute the ->complete() callbacks for all devices whose PM status is not
 670 * DPM_ON (this allows new devices to be registered).
 671 */
 672void dpm_complete(pm_message_t state)
 673{
 674        struct list_head list;
 675
 676        might_sleep();
 677
 678        INIT_LIST_HEAD(&list);
 679        mutex_lock(&dpm_list_mtx);
 680        while (!list_empty(&dpm_prepared_list)) {
 681                struct device *dev = to_device(dpm_prepared_list.prev);
 682
 683                get_device(dev);
 684                dev->power.is_prepared = false;
 685                list_move(&dev->power.entry, &list);
 686                mutex_unlock(&dpm_list_mtx);
 687
 688                device_complete(dev, state);
 689
 690                mutex_lock(&dpm_list_mtx);
 691                put_device(dev);
 692        }
 693        list_splice(&list, &dpm_list);
 694        mutex_unlock(&dpm_list_mtx);
 695}
 696
 697/**
 698 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
 699 * @state: PM transition of the system being carried out.
 700 *
 701 * Execute "resume" callbacks for all devices and complete the PM transition of
 702 * the system.
 703 */
 704void dpm_resume_end(pm_message_t state)
 705{
 706        dpm_resume(state);
 707        dpm_complete(state);
 708}
 709EXPORT_SYMBOL_GPL(dpm_resume_end);
 710
 711
 712/*------------------------- Suspend routines -------------------------*/
 713
 714/**
 715 * resume_event - Return a "resume" message for given "suspend" sleep state.
 716 * @sleep_state: PM message representing a sleep state.
 717 *
 718 * Return a PM message representing the resume event corresponding to given
 719 * sleep state.
 720 */
 721static pm_message_t resume_event(pm_message_t sleep_state)
 722{
 723        switch (sleep_state.event) {
 724        case PM_EVENT_SUSPEND:
 725                return PMSG_RESUME;
 726        case PM_EVENT_FREEZE:
 727        case PM_EVENT_QUIESCE:
 728                return PMSG_RECOVER;
 729        case PM_EVENT_HIBERNATE:
 730                return PMSG_RESTORE;
 731        }
 732        return PMSG_ON;
 733}
 734
 735/**
 736 * device_suspend_noirq - Execute a "late suspend" callback for given device.
 737 * @dev: Device to handle.
 738 * @state: PM transition of the system being carried out.
 739 *
 740 * The driver of @dev will not receive interrupts while this function is being
 741 * executed.
 742 */
 743static int device_suspend_noirq(struct device *dev, pm_message_t state)
 744{
 745        int error;
 746
 747        if (dev->pwr_domain) {
 748                pm_dev_dbg(dev, state, "LATE power domain ");
 749                error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
 750                if (error)
 751                        return error;
 752        } else if (dev->type && dev->type->pm) {
 753                pm_dev_dbg(dev, state, "LATE type ");
 754                error = pm_noirq_op(dev, dev->type->pm, state);
 755                if (error)
 756                        return error;
 757        } else if (dev->class && dev->class->pm) {
 758                pm_dev_dbg(dev, state, "LATE class ");
 759                error = pm_noirq_op(dev, dev->class->pm, state);
 760                if (error)
 761                        return error;
 762        } else if (dev->bus && dev->bus->pm) {
 763                pm_dev_dbg(dev, state, "LATE ");
 764                error = pm_noirq_op(dev, dev->bus->pm, state);
 765                if (error)
 766                        return error;
 767        }
 768
 769        return 0;
 770}
 771
 772/**
 773 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
 774 * @state: PM transition of the system being carried out.
 775 *
 776 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
 777 * handlers for all non-sysdev devices.
 778 */
 779int dpm_suspend_noirq(pm_message_t state)
 780{
 781        ktime_t starttime = ktime_get();
 782        int error = 0;
 783
 784        suspend_device_irqs();
 785        mutex_lock(&dpm_list_mtx);
 786        while (!list_empty(&dpm_suspended_list)) {
 787                struct device *dev = to_device(dpm_suspended_list.prev);
 788
 789                get_device(dev);
 790                mutex_unlock(&dpm_list_mtx);
 791
 792                error = device_suspend_noirq(dev, state);
 793
 794                mutex_lock(&dpm_list_mtx);
 795                if (error) {
 796                        pm_dev_err(dev, state, " late", error);
 797                        put_device(dev);
 798                        break;
 799                }
 800                if (!list_empty(&dev->power.entry))
 801                        list_move(&dev->power.entry, &dpm_noirq_list);
 802                put_device(dev);
 803        }
 804        mutex_unlock(&dpm_list_mtx);
 805        if (error)
 806                dpm_resume_noirq(resume_event(state));
 807        else
 808                dpm_show_time(starttime, state, "late");
 809        return error;
 810}
 811EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
 812
 813/**
 814 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
 815 * @dev: Device to suspend.
 816 * @state: PM transition of the system being carried out.
 817 * @cb: Suspend callback to execute.
 818 */
 819static int legacy_suspend(struct device *dev, pm_message_t state,
 820                          int (*cb)(struct device *dev, pm_message_t state))
 821{
 822        int error;
 823        ktime_t calltime;
 824
 825        calltime = initcall_debug_start(dev);
 826
 827        error = cb(dev, state);
 828        suspend_report_result(cb, error);
 829
 830        initcall_debug_report(dev, calltime, error);
 831
 832        return error;
 833}
 834
 835/**
 836 * device_suspend - Execute "suspend" callbacks for given device.
 837 * @dev: Device to handle.
 838 * @state: PM transition of the system being carried out.
 839 * @async: If true, the device is being suspended asynchronously.
 840 */
 841static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 842{
 843        int error = 0;
 844
 845        dpm_wait_for_children(dev, async);
 846        device_lock(dev);
 847
 848        if (async_error)
 849                goto Unlock;
 850
 851        if (pm_wakeup_pending()) {
 852                async_error = -EBUSY;
 853                goto Unlock;
 854        }
 855
 856        if (dev->pwr_domain) {
 857                pm_dev_dbg(dev, state, "power domain ");
 858                error = pm_op(dev, &dev->pwr_domain->ops, state);
 859                goto End;
 860        }
 861
 862        if (dev->type && dev->type->pm) {
 863                pm_dev_dbg(dev, state, "type ");
 864                error = pm_op(dev, dev->type->pm, state);
 865                goto End;
 866        }
 867
 868        if (dev->class) {
 869                if (dev->class->pm) {
 870                        pm_dev_dbg(dev, state, "class ");
 871                        error = pm_op(dev, dev->class->pm, state);
 872                        goto End;
 873                } else if (dev->class->suspend) {
 874                        pm_dev_dbg(dev, state, "legacy class ");
 875                        error = legacy_suspend(dev, state, dev->class->suspend);
 876                        goto End;
 877                }
 878        }
 879
 880        if (dev->bus) {
 881                if (dev->bus->pm) {
 882                        pm_dev_dbg(dev, state, "");
 883                        error = pm_op(dev, dev->bus->pm, state);
 884                } else if (dev->bus->suspend) {
 885                        pm_dev_dbg(dev, state, "legacy ");
 886                        error = legacy_suspend(dev, state, dev->bus->suspend);
 887                }
 888        }
 889
 890 End:
 891        dev->power.is_suspended = !error;
 892
 893 Unlock:
 894        device_unlock(dev);
 895        complete_all(&dev->power.completion);
 896
 897        if (error)
 898                async_error = error;
 899
 900        return error;
 901}
 902
 903static void async_suspend(void *data, async_cookie_t cookie)
 904{
 905        struct device *dev = (struct device *)data;
 906        int error;
 907
 908        error = __device_suspend(dev, pm_transition, true);
 909        if (error)
 910                pm_dev_err(dev, pm_transition, " async", error);
 911
 912        put_device(dev);
 913}
 914
 915static int device_suspend(struct device *dev)
 916{
 917        INIT_COMPLETION(dev->power.completion);
 918
 919        if (pm_async_enabled && dev->power.async_suspend) {
 920                get_device(dev);
 921                async_schedule(async_suspend, dev);
 922                return 0;
 923        }
 924
 925        return __device_suspend(dev, pm_transition, false);
 926}
 927
 928/**
 929 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
 930 * @state: PM transition of the system being carried out.
 931 */
 932int dpm_suspend(pm_message_t state)
 933{
 934        ktime_t starttime = ktime_get();
 935        int error = 0;
 936
 937        might_sleep();
 938
 939        mutex_lock(&dpm_list_mtx);
 940        pm_transition = state;
 941        async_error = 0;
 942        while (!list_empty(&dpm_prepared_list)) {
 943                struct device *dev = to_device(dpm_prepared_list.prev);
 944
 945                get_device(dev);
 946                mutex_unlock(&dpm_list_mtx);
 947
 948                error = device_suspend(dev);
 949
 950                mutex_lock(&dpm_list_mtx);
 951                if (error) {
 952                        pm_dev_err(dev, state, "", error);
 953                        put_device(dev);
 954                        break;
 955                }
 956                if (!list_empty(&dev->power.entry))
 957                        list_move(&dev->power.entry, &dpm_suspended_list);
 958                put_device(dev);
 959                if (async_error)
 960                        break;
 961        }
 962        mutex_unlock(&dpm_list_mtx);
 963        async_synchronize_full();
 964        if (!error)
 965                error = async_error;
 966        if (!error)
 967                dpm_show_time(starttime, state, NULL);
 968        return error;
 969}
 970
 971/**
 972 * device_prepare - Prepare a device for system power transition.
 973 * @dev: Device to handle.
 974 * @state: PM transition of the system being carried out.
 975 *
 976 * Execute the ->prepare() callback(s) for given device.  No new children of the
 977 * device may be registered after this function has returned.
 978 */
 979static int device_prepare(struct device *dev, pm_message_t state)
 980{
 981        int error = 0;
 982
 983        device_lock(dev);
 984
 985        if (dev->pwr_domain) {
 986                pm_dev_dbg(dev, state, "preparing power domain ");
 987                if (dev->pwr_domain->ops.prepare)
 988                        error = dev->pwr_domain->ops.prepare(dev);
 989                suspend_report_result(dev->pwr_domain->ops.prepare, error);
 990                if (error)
 991                        goto End;
 992        } else if (dev->type && dev->type->pm) {
 993                pm_dev_dbg(dev, state, "preparing type ");
 994                if (dev->type->pm->prepare)
 995                        error = dev->type->pm->prepare(dev);
 996                suspend_report_result(dev->type->pm->prepare, error);
 997                if (error)
 998                        goto End;
 999        } else if (dev->class && dev->class->pm) {
1000                pm_dev_dbg(dev, state, "preparing class ");
1001                if (dev->class->pm->prepare)
1002                        error = dev->class->pm->prepare(dev);
1003                suspend_report_result(dev->class->pm->prepare, error);
1004                if (error)
1005                        goto End;
1006        } else if (dev->bus && dev->bus->pm) {
1007                pm_dev_dbg(dev, state, "preparing ");
1008                if (dev->bus->pm->prepare)
1009                        error = dev->bus->pm->prepare(dev);
1010                suspend_report_result(dev->bus->pm->prepare, error);
1011        }
1012
1013 End:
1014        device_unlock(dev);
1015
1016        return error;
1017}
1018
1019/**
1020 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1021 * @state: PM transition of the system being carried out.
1022 *
1023 * Execute the ->prepare() callback(s) for all devices.
1024 */
1025int dpm_prepare(pm_message_t state)
1026{
1027        int error = 0;
1028
1029        might_sleep();
1030
1031        mutex_lock(&dpm_list_mtx);
1032        while (!list_empty(&dpm_list)) {
1033                struct device *dev = to_device(dpm_list.next);
1034
1035                get_device(dev);
1036                mutex_unlock(&dpm_list_mtx);
1037
1038                pm_runtime_get_noresume(dev);
1039                if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1040                        pm_wakeup_event(dev, 0);
1041
1042                pm_runtime_put_sync(dev);
1043                error = pm_wakeup_pending() ?
1044                                -EBUSY : device_prepare(dev, state);
1045
1046                mutex_lock(&dpm_list_mtx);
1047                if (error) {
1048                        if (error == -EAGAIN) {
1049                                put_device(dev);
1050                                error = 0;
1051                                continue;
1052                        }
1053                        printk(KERN_INFO "PM: Device %s not prepared "
1054                                "for power transition: code %d\n",
1055                                dev_name(dev), error);
1056                        put_device(dev);
1057                        break;
1058                }
1059                dev->power.is_prepared = true;
1060                if (!list_empty(&dev->power.entry))
1061                        list_move_tail(&dev->power.entry, &dpm_prepared_list);
1062                put_device(dev);
1063        }
1064        mutex_unlock(&dpm_list_mtx);
1065        return error;
1066}
1067
1068/**
1069 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1070 * @state: PM transition of the system being carried out.
1071 *
1072 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1073 * callbacks for them.
1074 */
1075int dpm_suspend_start(pm_message_t state)
1076{
1077        int error;
1078
1079        error = dpm_prepare(state);
1080        if (!error)
1081                error = dpm_suspend(state);
1082        return error;
1083}
1084EXPORT_SYMBOL_GPL(dpm_suspend_start);
1085
1086void __suspend_report_result(const char *function, void *fn, int ret)
1087{
1088        if (ret)
1089                printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1090}
1091EXPORT_SYMBOL_GPL(__suspend_report_result);
1092
1093/**
1094 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1095 * @dev: Device to wait for.
1096 * @subordinate: Device that needs to wait for @dev.
1097 */
1098int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1099{
1100        dpm_wait(dev, subordinate->power.async_suspend);
1101        return async_error;
1102}
1103EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1104