1/* 2 * pm.h - Power management interface 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21#ifndef _LINUX_PM_H 22#define _LINUX_PM_H 23 24#include <linux/list.h> 25#include <linux/workqueue.h> 26#include <linux/spinlock.h> 27#include <linux/wait.h> 28#include <linux/timer.h> 29#include <linux/completion.h> 30 31/* 32 * Callbacks for platform drivers to implement. 33 */ 34extern void (*pm_power_off)(void); 35extern void (*pm_power_off_prepare)(void); 36 37struct device; /* we have a circular dep with device.h */ 38#ifdef CONFIG_VT_CONSOLE_SLEEP 39extern void pm_vt_switch_required(struct device *dev, bool required); 40extern void pm_vt_switch_unregister(struct device *dev); 41#else 42static inline void pm_vt_switch_required(struct device *dev, bool required) 43{ 44} 45static inline void pm_vt_switch_unregister(struct device *dev) 46{ 47} 48#endif /* CONFIG_VT_CONSOLE_SLEEP */ 49 50/* 51 * Device power management 52 */ 53 54struct device; 55 56#ifdef CONFIG_PM 57extern const char power_group_name[]; /* = "power" */ 58#else 59#define power_group_name NULL 60#endif 61 62typedef struct pm_message { 63 int event; 64} pm_message_t; 65 66/** 67 * struct dev_pm_ops - device PM callbacks 68 * 69 * Several device power state transitions are externally visible, affecting 70 * the state of pending I/O queues and (for drivers that touch hardware) 71 * interrupts, wakeups, DMA, and other hardware state. There may also be 72 * internal transitions to various low-power modes which are transparent 73 * to the rest of the driver stack (such as a driver that's ON gating off 74 * clocks which are not in active use). 75 * 76 * The externally visible transitions are handled with the help of callbacks 77 * included in this structure in such a way that two levels of callbacks are 78 * involved. First, the PM core executes callbacks provided by PM domains, 79 * device types, classes and bus types. They are the subsystem-level callbacks 80 * supposed to execute callbacks provided by device drivers, although they may 81 * choose not to do that. If the driver callbacks are executed, they have to 82 * collaborate with the subsystem-level callbacks to achieve the goals 83 * appropriate for the given system transition, given transition phase and the 84 * subsystem the device belongs to. 85 * 86 * @prepare: The principal role of this callback is to prevent new children of 87 * the device from being registered after it has returned (the driver's 88 * subsystem and generally the rest of the kernel is supposed to prevent 89 * new calls to the probe method from being made too once @prepare() has 90 * succeeded). If @prepare() detects a situation it cannot handle (e.g. 91 * registration of a child already in progress), it may return -EAGAIN, so 92 * that the PM core can execute it once again (e.g. after a new child has 93 * been registered) to recover from the race condition. 94 * This method is executed for all kinds of suspend transitions and is 95 * followed by one of the suspend callbacks: @suspend(), @freeze(), or 96 * @poweroff(). The PM core executes subsystem-level @prepare() for all 97 * devices before starting to invoke suspend callbacks for any of them, so 98 * generally devices may be assumed to be functional or to respond to 99 * runtime resume requests while @prepare() is being executed. However, 100 * device drivers may NOT assume anything about the availability of user 101 * space at that time and it is NOT valid to request firmware from within 102 * @prepare() (it's too late to do that). It also is NOT valid to allocate 103 * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. 104 * [To work around these limitations, drivers may register suspend and 105 * hibernation notifiers to be executed before the freezing of tasks.] 106 * 107 * @complete: Undo the changes made by @prepare(). This method is executed for 108 * all kinds of resume transitions, following one of the resume callbacks: 109 * @resume(), @thaw(), @restore(). Also called if the state transition 110 * fails before the driver's suspend callback: @suspend(), @freeze() or 111 * @poweroff(), can be executed (e.g. if the suspend callback fails for one 112 * of the other devices that the PM core has unsuccessfully attempted to 113 * suspend earlier). 114 * The PM core executes subsystem-level @complete() after it has executed 115 * the appropriate resume callbacks for all devices. 116 * 117 * @suspend: Executed before putting the system into a sleep state in which the 118 * contents of main memory are preserved. The exact action to perform 119 * depends on the device's subsystem (PM domain, device type, class or bus 120 * type), but generally the device must be quiescent after subsystem-level 121 * @suspend() has returned, so that it doesn't do any I/O or DMA. 122 * Subsystem-level @suspend() is executed for all devices after invoking 123 * subsystem-level @prepare() for all of them. 124 * 125 * @suspend_late: Continue operations started by @suspend(). For a number of 126 * devices @suspend_late() may point to the same callback routine as the 127 * runtime suspend callback. 128 * 129 * @resume: Executed after waking the system up from a sleep state in which the 130 * contents of main memory were preserved. The exact action to perform 131 * depends on the device's subsystem, but generally the driver is expected 132 * to start working again, responding to hardware events and software 133 * requests (the device itself may be left in a low-power state, waiting 134 * for a runtime resume to occur). The state of the device at the time its 135 * driver's @resume() callback is run depends on the platform and subsystem 136 * the device belongs to. On most platforms, there are no restrictions on 137 * availability of resources like clocks during @resume(). 138 * Subsystem-level @resume() is executed for all devices after invoking 139 * subsystem-level @resume_noirq() for all of them. 140 * 141 * @resume_early: Prepare to execute @resume(). For a number of devices 142 * @resume_early() may point to the same callback routine as the runtime 143 * resume callback. 144 * 145 * @freeze: Hibernation-specific, executed before creating a hibernation image. 146 * Analogous to @suspend(), but it should not enable the device to signal 147 * wakeup events or change its power state. The majority of subsystems 148 * (with the notable exception of the PCI bus type) expect the driver-level 149 * @freeze() to save the device settings in memory to be used by @restore() 150 * during the subsequent resume from hibernation. 151 * Subsystem-level @freeze() is executed for all devices after invoking 152 * subsystem-level @prepare() for all of them. 153 * 154 * @freeze_late: Continue operations started by @freeze(). Analogous to 155 * @suspend_late(), but it should not enable the device to signal wakeup 156 * events or change its power state. 157 * 158 * @thaw: Hibernation-specific, executed after creating a hibernation image OR 159 * if the creation of an image has failed. Also executed after a failing 160 * attempt to restore the contents of main memory from such an image. 161 * Undo the changes made by the preceding @freeze(), so the device can be 162 * operated in the same way as immediately before the call to @freeze(). 163 * Subsystem-level @thaw() is executed for all devices after invoking 164 * subsystem-level @thaw_noirq() for all of them. It also may be executed 165 * directly after @freeze() in case of a transition error. 166 * 167 * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the 168 * preceding @freeze_late(). 169 * 170 * @poweroff: Hibernation-specific, executed after saving a hibernation image. 171 * Analogous to @suspend(), but it need not save the device's settings in 172 * memory. 173 * Subsystem-level @poweroff() is executed for all devices after invoking 174 * subsystem-level @prepare() for all of them. 175 * 176 * @poweroff_late: Continue operations started by @poweroff(). Analogous to 177 * @suspend_late(), but it need not save the device's settings in memory. 178 * 179 * @restore: Hibernation-specific, executed after restoring the contents of main 180 * memory from a hibernation image, analogous to @resume(). 181 * 182 * @restore_early: Prepare to execute @restore(), analogous to @resume_early(). 183 * 184 * @suspend_noirq: Complete the actions started by @suspend(). Carry out any 185 * additional operations required for suspending the device that might be 186 * racing with its driver's interrupt handler, which is guaranteed not to 187 * run while @suspend_noirq() is being executed. 188 * It generally is expected that the device will be in a low-power state 189 * (appropriate for the target system sleep state) after subsystem-level 190 * @suspend_noirq() has returned successfully. If the device can generate 191 * system wakeup signals and is enabled to wake up the system, it should be 192 * configured to do so at that time. However, depending on the platform 193 * and device's subsystem, @suspend() or @suspend_late() may be allowed to 194 * put the device into the low-power state and configure it to generate 195 * wakeup signals, in which case it generally is not necessary to define 196 * @suspend_noirq(). 197 * 198 * @resume_noirq: Prepare for the execution of @resume() by carrying out any 199 * operations required for resuming the device that might be racing with 200 * its driver's interrupt handler, which is guaranteed not to run while 201 * @resume_noirq() is being executed. 202 * 203 * @freeze_noirq: Complete the actions started by @freeze(). Carry out any 204 * additional operations required for freezing the device that might be 205 * racing with its driver's interrupt handler, which is guaranteed not to 206 * run while @freeze_noirq() is being executed. 207 * The power state of the device should not be changed by either @freeze(), 208 * or @freeze_late(), or @freeze_noirq() and it should not be configured to 209 * signal system wakeup by any of these callbacks. 210 * 211 * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any 212 * operations required for thawing the device that might be racing with its 213 * driver's interrupt handler, which is guaranteed not to run while 214 * @thaw_noirq() is being executed. 215 * 216 * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to 217 * @suspend_noirq(), but it need not save the device's settings in memory. 218 * 219 * @restore_noirq: Prepare for the execution of @restore() by carrying out any 220 * operations required for thawing the device that might be racing with its 221 * driver's interrupt handler, which is guaranteed not to run while 222 * @restore_noirq() is being executed. Analogous to @resume_noirq(). 223 * 224 * All of the above callbacks, except for @complete(), return error codes. 225 * However, the error codes returned by the resume operations, @resume(), 226 * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do 227 * not cause the PM core to abort the resume transition during which they are 228 * returned. The error codes returned in those cases are only printed by the PM 229 * core to the system logs for debugging purposes. Still, it is recommended 230 * that drivers only return error codes from their resume methods in case of an 231 * unrecoverable failure (i.e. when the device being handled refuses to resume 232 * and becomes unusable) to allow us to modify the PM core in the future, so 233 * that it can avoid attempting to handle devices that failed to resume and 234 * their children. 235 * 236 * It is allowed to unregister devices while the above callbacks are being 237 * executed. However, a callback routine must NOT try to unregister the device 238 * it was called for, although it may unregister children of that device (for 239 * example, if it detects that a child was unplugged while the system was 240 * asleep). 241 * 242 * Refer to Documentation/power/devices.txt for more information about the role 243 * of the above callbacks in the system suspend process. 244 * 245 * There also are callbacks related to runtime power management of devices. 246 * Again, these callbacks are executed by the PM core only for subsystems 247 * (PM domains, device types, classes and bus types) and the subsystem-level 248 * callbacks are supposed to invoke the driver callbacks. Moreover, the exact 249 * actions to be performed by a device driver's callbacks generally depend on 250 * the platform and subsystem the device belongs to. 251 * 252 * @runtime_suspend: Prepare the device for a condition in which it won't be 253 * able to communicate with the CPU(s) and RAM due to power management. 254 * This need not mean that the device should be put into a low-power state. 255 * For example, if the device is behind a link which is about to be turned 256 * off, the device may remain at full power. If the device does go to low 257 * power and is capable of generating runtime wakeup events, remote wakeup 258 * (i.e., a hardware mechanism allowing the device to request a change of 259 * its power state via an interrupt) should be enabled for it. 260 * 261 * @runtime_resume: Put the device into the fully active state in response to a 262 * wakeup event generated by hardware or at the request of software. If 263 * necessary, put the device into the full-power state and restore its 264 * registers, so that it is fully operational. 265 * 266 * @runtime_idle: Device appears to be inactive and it might be put into a 267 * low-power state if all of the necessary conditions are satisfied. 268 * Check these conditions, and return 0 if it's appropriate to let the PM 269 * core queue a suspend request for the device. 270 * 271 * Refer to Documentation/power/runtime_pm.txt for more information about the 272 * role of the above callbacks in device runtime power management. 273 * 274 */ 275 276struct dev_pm_ops { 277 int (*prepare)(struct device *dev); 278 void (*complete)(struct device *dev); 279 int (*suspend)(struct device *dev); 280 int (*resume)(struct device *dev); 281 int (*freeze)(struct device *dev); 282 int (*thaw)(struct device *dev); 283 int (*poweroff)(struct device *dev); 284 int (*restore)(struct device *dev); 285 int (*suspend_late)(struct device *dev); 286 int (*resume_early)(struct device *dev); 287 int (*freeze_late)(struct device *dev); 288 int (*thaw_early)(struct device *dev); 289 int (*poweroff_late)(struct device *dev); 290 int (*restore_early)(struct device *dev); 291 int (*suspend_noirq)(struct device *dev); 292 int (*resume_noirq)(struct device *dev); 293 int (*freeze_noirq)(struct device *dev); 294 int (*thaw_noirq)(struct device *dev); 295 int (*poweroff_noirq)(struct device *dev); 296 int (*restore_noirq)(struct device *dev); 297 int (*runtime_suspend)(struct device *dev); 298 int (*runtime_resume)(struct device *dev); 299 int (*runtime_idle)(struct device *dev); 300}; 301 302#ifdef CONFIG_PM_SLEEP 303#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 304 .suspend = suspend_fn, \ 305 .resume = resume_fn, \ 306 .freeze = suspend_fn, \ 307 .thaw = resume_fn, \ 308 .poweroff = suspend_fn, \ 309 .restore = resume_fn, 310#else 311#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 312#endif 313 314#ifdef CONFIG_PM_SLEEP 315#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 316 .suspend_late = suspend_fn, \ 317 .resume_early = resume_fn, \ 318 .freeze_late = suspend_fn, \ 319 .thaw_early = resume_fn, \ 320 .poweroff_late = suspend_fn, \ 321 .restore_early = resume_fn, 322#else 323#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 324#endif 325 326#ifdef CONFIG_PM_RUNTIME 327#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 328 .runtime_suspend = suspend_fn, \ 329 .runtime_resume = resume_fn, \ 330 .runtime_idle = idle_fn, 331#else 332#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) 333#endif 334 335#ifdef CONFIG_PM 336#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 337 .runtime_suspend = suspend_fn, \ 338 .runtime_resume = resume_fn, \ 339 .runtime_idle = idle_fn, 340#else 341#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) 342#endif 343 344/* 345 * Use this if you want to use the same suspend and resume callbacks for suspend 346 * to RAM and hibernation. 347 */ 348#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ 349const struct dev_pm_ops name = { \ 350 SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 351} 352 353/* 354 * Use this for defining a set of PM operations to be used in all situations 355 * (system suspend, hibernation or runtime PM). 356 * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should 357 * be different from the corresponding runtime PM callbacks, .runtime_suspend(), 358 * and .runtime_resume(), because .runtime_suspend() always works on an already 359 * quiescent device, while .suspend() should assume that the device may be doing 360 * something when it is called (it should ensure that the device will be 361 * quiescent after it has returned). Therefore it's better to point the "late" 362 * suspend and "early" resume callback pointers, .suspend_late() and 363 * .resume_early(), to the same routines as .runtime_suspend() and 364 * .runtime_resume(), respectively (and analogously for hibernation). 365 */ 366#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ 367const struct dev_pm_ops name = { \ 368 SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 369 SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 370} 371 372/** 373 * PM_EVENT_ messages 374 * 375 * The following PM_EVENT_ messages are defined for the internal use of the PM 376 * core, in order to provide a mechanism allowing the high level suspend and 377 * hibernation code to convey the necessary information to the device PM core 378 * code: 379 * 380 * ON No transition. 381 * 382 * FREEZE System is going to hibernate, call ->prepare() and ->freeze() 383 * for all devices. 384 * 385 * SUSPEND System is going to suspend, call ->prepare() and ->suspend() 386 * for all devices. 387 * 388 * HIBERNATE Hibernation image has been saved, call ->prepare() and 389 * ->poweroff() for all devices. 390 * 391 * QUIESCE Contents of main memory are going to be restored from a (loaded) 392 * hibernation image, call ->prepare() and ->freeze() for all 393 * devices. 394 * 395 * RESUME System is resuming, call ->resume() and ->complete() for all 396 * devices. 397 * 398 * THAW Hibernation image has been created, call ->thaw() and 399 * ->complete() for all devices. 400 * 401 * RESTORE Contents of main memory have been restored from a hibernation 402 * image, call ->restore() and ->complete() for all devices. 403 * 404 * RECOVER Creation of a hibernation image or restoration of the main 405 * memory contents from a hibernation image has failed, call 406 * ->thaw() and ->complete() for all devices. 407 * 408 * The following PM_EVENT_ messages are defined for internal use by 409 * kernel subsystems. They are never issued by the PM core. 410 * 411 * USER_SUSPEND Manual selective suspend was issued by userspace. 412 * 413 * USER_RESUME Manual selective resume was issued by userspace. 414 * 415 * REMOTE_WAKEUP Remote-wakeup request was received from the device. 416 * 417 * AUTO_SUSPEND Automatic (device idle) runtime suspend was 418 * initiated by the subsystem. 419 * 420 * AUTO_RESUME Automatic (device needed) runtime resume was 421 * requested by a driver. 422 */ 423 424#define PM_EVENT_INVALID (-1) 425#define PM_EVENT_ON 0x0000 426#define PM_EVENT_FREEZE 0x0001 427#define PM_EVENT_SUSPEND 0x0002 428#define PM_EVENT_HIBERNATE 0x0004 429#define PM_EVENT_QUIESCE 0x0008 430#define PM_EVENT_RESUME 0x0010 431#define PM_EVENT_THAW 0x0020 432#define PM_EVENT_RESTORE 0x0040 433#define PM_EVENT_RECOVER 0x0080 434#define PM_EVENT_USER 0x0100 435#define PM_EVENT_REMOTE 0x0200 436#define PM_EVENT_AUTO 0x0400 437 438#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) 439#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) 440#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) 441#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME) 442#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) 443#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) 444 445#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, }) 446#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) 447#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) 448#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) 449#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) 450#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) 451#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) 452#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) 453#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) 454#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) 455#define PMSG_USER_SUSPEND ((struct pm_message) \ 456 { .event = PM_EVENT_USER_SUSPEND, }) 457#define PMSG_USER_RESUME ((struct pm_message) \ 458 { .event = PM_EVENT_USER_RESUME, }) 459#define PMSG_REMOTE_RESUME ((struct pm_message) \ 460 { .event = PM_EVENT_REMOTE_RESUME, }) 461#define PMSG_AUTO_SUSPEND ((struct pm_message) \ 462 { .event = PM_EVENT_AUTO_SUSPEND, }) 463#define PMSG_AUTO_RESUME ((struct pm_message) \ 464 { .event = PM_EVENT_AUTO_RESUME, }) 465 466#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) 467 468/** 469 * Device run-time power management status. 470 * 471 * These status labels are used internally by the PM core to indicate the 472 * current status of a device with respect to the PM core operations. They do 473 * not reflect the actual power state of the device or its status as seen by the 474 * driver. 475 * 476 * RPM_ACTIVE Device is fully operational. Indicates that the device 477 * bus type's ->runtime_resume() callback has completed 478 * successfully. 479 * 480 * RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has 481 * completed successfully. The device is regarded as 482 * suspended. 483 * 484 * RPM_RESUMING Device bus type's ->runtime_resume() callback is being 485 * executed. 486 * 487 * RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being 488 * executed. 489 */ 490 491enum rpm_status { 492 RPM_ACTIVE = 0, 493 RPM_RESUMING, 494 RPM_SUSPENDED, 495 RPM_SUSPENDING, 496}; 497 498/** 499 * Device run-time power management request types. 500 * 501 * RPM_REQ_NONE Do nothing. 502 * 503 * RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback 504 * 505 * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback 506 * 507 * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has 508 * been inactive for as long as power.autosuspend_delay 509 * 510 * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback 511 */ 512 513enum rpm_request { 514 RPM_REQ_NONE = 0, 515 RPM_REQ_IDLE, 516 RPM_REQ_SUSPEND, 517 RPM_REQ_AUTOSUSPEND, 518 RPM_REQ_RESUME, 519}; 520 521struct wakeup_source; 522 523struct pm_domain_data { 524 struct list_head list_node; 525 struct device *dev; 526}; 527 528struct pm_subsys_data { 529 spinlock_t lock; 530 unsigned int refcount; 531#ifdef CONFIG_PM_CLK 532 struct list_head clock_list; 533#endif 534#ifdef CONFIG_PM_GENERIC_DOMAINS 535 struct pm_domain_data *domain_data; 536#endif 537}; 538 539struct dev_pm_info { 540 pm_message_t power_state; 541 unsigned int can_wakeup:1; 542 unsigned int async_suspend:1; 543 bool is_prepared:1; /* Owned by the PM core */ 544 bool is_suspended:1; /* Ditto */ 545 bool is_noirq_suspended:1; 546 bool is_late_suspended:1; 547 bool ignore_children:1; 548 bool early_init:1; /* Owned by the PM core */ 549 spinlock_t lock; 550#ifdef CONFIG_PM_SLEEP 551 struct list_head entry; 552 struct completion completion; 553 struct wakeup_source *wakeup; 554 bool wakeup_path:1; 555 bool syscore:1; 556#else 557 unsigned int should_wakeup:1; 558#endif 559#ifdef CONFIG_PM_RUNTIME 560 struct timer_list suspend_timer; 561 unsigned long timer_expires; 562 struct work_struct work; 563 wait_queue_head_t wait_queue; 564 atomic_t usage_count; 565 atomic_t child_count; 566 unsigned int disable_depth:3; 567 unsigned int idle_notification:1; 568 unsigned int request_pending:1; 569 unsigned int deferred_resume:1; 570 unsigned int run_wake:1; 571 unsigned int runtime_auto:1; 572 unsigned int no_callbacks:1; 573 unsigned int irq_safe:1; 574 unsigned int use_autosuspend:1; 575 unsigned int timer_autosuspends:1; 576 unsigned int memalloc_noio:1; 577 enum rpm_request request; 578 enum rpm_status runtime_status; 579 int runtime_error; 580 int autosuspend_delay; 581 unsigned long last_busy; 582 unsigned long active_jiffies; 583 unsigned long suspended_jiffies; 584 unsigned long accounting_timestamp; 585#endif 586 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ 587 void (*set_latency_tolerance)(struct device *, s32); 588 struct dev_pm_qos *qos; 589}; 590 591extern void update_pm_runtime_accounting(struct device *dev); 592extern int dev_pm_get_subsys_data(struct device *dev); 593extern int dev_pm_put_subsys_data(struct device *dev); 594 595/* 596 * Power domains provide callbacks that are executed during system suspend, 597 * hibernation, system resume and during runtime PM transitions along with 598 * subsystem-level and driver-level callbacks. 599 */ 600struct dev_pm_domain { 601 struct dev_pm_ops ops; 602}; 603 604/* 605 * The PM_EVENT_ messages are also used by drivers implementing the legacy 606 * suspend framework, based on the ->suspend() and ->resume() callbacks common 607 * for suspend and hibernation transitions, according to the rules below. 608 */ 609 610/* Necessary, because several drivers use PM_EVENT_PRETHAW */ 611#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE 612 613/* 614 * One transition is triggered by resume(), after a suspend() call; the 615 * message is implicit: 616 * 617 * ON Driver starts working again, responding to hardware events 618 * and software requests. The hardware may have gone through 619 * a power-off reset, or it may have maintained state from the 620 * previous suspend() which the driver will rely on while 621 * resuming. On most platforms, there are no restrictions on 622 * availability of resources like clocks during resume(). 623 * 624 * Other transitions are triggered by messages sent using suspend(). All 625 * these transitions quiesce the driver, so that I/O queues are inactive. 626 * That commonly entails turning off IRQs and DMA; there may be rules 627 * about how to quiesce that are specific to the bus or the device's type. 628 * (For example, network drivers mark the link state.) Other details may 629 * differ according to the message: 630 * 631 * SUSPEND Quiesce, enter a low power device state appropriate for 632 * the upcoming system state (such as PCI_D3hot), and enable 633 * wakeup events as appropriate. 634 * 635 * HIBERNATE Enter a low power device state appropriate for the hibernation 636 * state (eg. ACPI S4) and enable wakeup events as appropriate. 637 * 638 * FREEZE Quiesce operations so that a consistent image can be saved; 639 * but do NOT otherwise enter a low power device state, and do 640 * NOT emit system wakeup events. 641 * 642 * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring 643 * the system from a snapshot taken after an earlier FREEZE. 644 * Some drivers will need to reset their hardware state instead 645 * of preserving it, to ensure that it's never mistaken for the 646 * state which that earlier snapshot had set up. 647 * 648 * A minimally power-aware driver treats all messages as SUSPEND, fully 649 * reinitializes its device during resume() -- whether or not it was reset 650 * during the suspend/resume cycle -- and can't issue wakeup events. 651 * 652 * More power-aware drivers may also use low power states at runtime as 653 * well as during system sleep states like PM_SUSPEND_STANDBY. They may 654 * be able to use wakeup events to exit from runtime low-power states, 655 * or from system low-power states such as standby or suspend-to-RAM. 656 */ 657 658#ifdef CONFIG_PM_SLEEP 659extern void device_pm_lock(void); 660extern void dpm_resume_start(pm_message_t state); 661extern void dpm_resume_end(pm_message_t state); 662extern void dpm_resume(pm_message_t state); 663extern void dpm_complete(pm_message_t state); 664 665extern void device_pm_unlock(void); 666extern int dpm_suspend_end(pm_message_t state); 667extern int dpm_suspend_start(pm_message_t state); 668extern int dpm_suspend(pm_message_t state); 669extern int dpm_prepare(pm_message_t state); 670 671extern void __suspend_report_result(const char *function, void *fn, int ret); 672 673#define suspend_report_result(fn, ret) \ 674 do { \ 675 __suspend_report_result(__func__, fn, ret); \ 676 } while (0) 677 678extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); 679extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)); 680 681extern int pm_generic_prepare(struct device *dev); 682extern int pm_generic_suspend_late(struct device *dev); 683extern int pm_generic_suspend_noirq(struct device *dev); 684extern int pm_generic_suspend(struct device *dev); 685extern int pm_generic_resume_early(struct device *dev); 686extern int pm_generic_resume_noirq(struct device *dev); 687extern int pm_generic_resume(struct device *dev); 688extern int pm_generic_freeze_noirq(struct device *dev); 689extern int pm_generic_freeze_late(struct device *dev); 690extern int pm_generic_freeze(struct device *dev); 691extern int pm_generic_thaw_noirq(struct device *dev); 692extern int pm_generic_thaw_early(struct device *dev); 693extern int pm_generic_thaw(struct device *dev); 694extern int pm_generic_restore_noirq(struct device *dev); 695extern int pm_generic_restore_early(struct device *dev); 696extern int pm_generic_restore(struct device *dev); 697extern int pm_generic_poweroff_noirq(struct device *dev); 698extern int pm_generic_poweroff_late(struct device *dev); 699extern int pm_generic_poweroff(struct device *dev); 700extern void pm_generic_complete(struct device *dev); 701 702#else /* !CONFIG_PM_SLEEP */ 703 704#define device_pm_lock() do {} while (0) 705#define device_pm_unlock() do {} while (0) 706 707static inline int dpm_suspend_start(pm_message_t state) 708{ 709 return 0; 710} 711 712#define suspend_report_result(fn, ret) do {} while (0) 713 714static inline int device_pm_wait_for_dev(struct device *a, struct device *b) 715{ 716 return 0; 717} 718 719static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) 720{ 721} 722 723#define pm_generic_prepare NULL 724#define pm_generic_suspend_late NULL 725#define pm_generic_suspend_noirq NULL 726#define pm_generic_suspend NULL 727#define pm_generic_resume_early NULL 728#define pm_generic_resume_noirq NULL 729#define pm_generic_resume NULL 730#define pm_generic_freeze_noirq NULL 731#define pm_generic_freeze_late NULL 732#define pm_generic_freeze NULL 733#define pm_generic_thaw_noirq NULL 734#define pm_generic_thaw_early NULL 735#define pm_generic_thaw NULL 736#define pm_generic_restore_noirq NULL 737#define pm_generic_restore_early NULL 738#define pm_generic_restore NULL 739#define pm_generic_poweroff_noirq NULL 740#define pm_generic_poweroff_late NULL 741#define pm_generic_poweroff NULL 742#define pm_generic_complete NULL 743#endif /* !CONFIG_PM_SLEEP */ 744 745/* How to reorder dpm_list after device_move() */ 746enum dpm_order { 747 DPM_ORDER_NONE, 748 DPM_ORDER_DEV_AFTER_PARENT, 749 DPM_ORDER_PARENT_BEFORE_DEV, 750 DPM_ORDER_DEV_LAST, 751}; 752 753#endif /* _LINUX_PM_H */ 754