1/* 2 * pm.h - Power management interface 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 21#ifndef _LINUX_PM_H 22#define _LINUX_PM_H 23 24#include <linux/list.h> 25#include <linux/workqueue.h> 26#include <linux/spinlock.h> 27#include <linux/wait.h> 28#include <linux/timer.h> 29#include <linux/completion.h> 30 31/* 32 * Callbacks for platform drivers to implement. 33 */ 34extern void (*pm_idle)(void); 35extern void (*pm_power_off)(void); 36extern void (*pm_power_off_prepare)(void); 37 38/* 39 * Device power management 40 */ 41 42struct device; 43 44#ifdef CONFIG_PM 45extern const char power_group_name[]; /* = "power" */ 46#else 47#define power_group_name NULL 48#endif 49 50typedef struct pm_message { 51 int event; 52} pm_message_t; 53 54/** 55 * struct dev_pm_ops - device PM callbacks 56 * 57 * Several device power state transitions are externally visible, affecting 58 * the state of pending I/O queues and (for drivers that touch hardware) 59 * interrupts, wakeups, DMA, and other hardware state. There may also be 60 * internal transitions to various low-power modes which are transparent 61 * to the rest of the driver stack (such as a driver that's ON gating off 62 * clocks which are not in active use). 63 * 64 * The externally visible transitions are handled with the help of callbacks 65 * included in this structure in such a way that two levels of callbacks are 66 * involved. First, the PM core executes callbacks provided by PM domains, 67 * device types, classes and bus types. They are the subsystem-level callbacks 68 * supposed to execute callbacks provided by device drivers, although they may 69 * choose not to do that. If the driver callbacks are executed, they have to 70 * collaborate with the subsystem-level callbacks to achieve the goals 71 * appropriate for the given system transition, given transition phase and the 72 * subsystem the device belongs to. 73 * 74 * @prepare: The principal role of this callback is to prevent new children of 75 * the device from being registered after it has returned (the driver's 76 * subsystem and generally the rest of the kernel is supposed to prevent 77 * new calls to the probe method from being made too once @prepare() has 78 * succeeded). If @prepare() detects a situation it cannot handle (e.g. 79 * registration of a child already in progress), it may return -EAGAIN, so 80 * that the PM core can execute it once again (e.g. after a new child has 81 * been registered) to recover from the race condition. 82 * This method is executed for all kinds of suspend transitions and is 83 * followed by one of the suspend callbacks: @suspend(), @freeze(), or 84 * @poweroff(). The PM core executes subsystem-level @prepare() for all 85 * devices before starting to invoke suspend callbacks for any of them, so 86 * generally devices may be assumed to be functional or to respond to 87 * runtime resume requests while @prepare() is being executed. However, 88 * device drivers may NOT assume anything about the availability of user 89 * space at that time and it is NOT valid to request firmware from within 90 * @prepare() (it's too late to do that). It also is NOT valid to allocate 91 * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. 92 * [To work around these limitations, drivers may register suspend and 93 * hibernation notifiers to be executed before the freezing of tasks.] 94 * 95 * @complete: Undo the changes made by @prepare(). This method is executed for 96 * all kinds of resume transitions, following one of the resume callbacks: 97 * @resume(), @thaw(), @restore(). Also called if the state transition 98 * fails before the driver's suspend callback: @suspend(), @freeze() or 99 * @poweroff(), can be executed (e.g. if the suspend callback fails for one 100 * of the other devices that the PM core has unsuccessfully attempted to 101 * suspend earlier). 102 * The PM core executes subsystem-level @complete() after it has executed 103 * the appropriate resume callbacks for all devices. 104 * 105 * @suspend: Executed before putting the system into a sleep state in which the 106 * contents of main memory are preserved. The exact action to perform 107 * depends on the device's subsystem (PM domain, device type, class or bus 108 * type), but generally the device must be quiescent after subsystem-level 109 * @suspend() has returned, so that it doesn't do any I/O or DMA. 110 * Subsystem-level @suspend() is executed for all devices after invoking 111 * subsystem-level @prepare() for all of them. 112 * 113 * @suspend_late: Continue operations started by @suspend(). For a number of 114 * devices @suspend_late() may point to the same callback routine as the 115 * runtime suspend callback. 116 * 117 * @resume: Executed after waking the system up from a sleep state in which the 118 * contents of main memory were preserved. The exact action to perform 119 * depends on the device's subsystem, but generally the driver is expected 120 * to start working again, responding to hardware events and software 121 * requests (the device itself may be left in a low-power state, waiting 122 * for a runtime resume to occur). The state of the device at the time its 123 * driver's @resume() callback is run depends on the platform and subsystem 124 * the device belongs to. On most platforms, there are no restrictions on 125 * availability of resources like clocks during @resume(). 126 * Subsystem-level @resume() is executed for all devices after invoking 127 * subsystem-level @resume_noirq() for all of them. 128 * 129 * @resume_early: Prepare to execute @resume(). For a number of devices 130 * @resume_early() may point to the same callback routine as the runtime 131 * resume callback. 132 * 133 * @freeze: Hibernation-specific, executed before creating a hibernation image. 134 * Analogous to @suspend(), but it should not enable the device to signal 135 * wakeup events or change its power state. The majority of subsystems 136 * (with the notable exception of the PCI bus type) expect the driver-level 137 * @freeze() to save the device settings in memory to be used by @restore() 138 * during the subsequent resume from hibernation. 139 * Subsystem-level @freeze() is executed for all devices after invoking 140 * subsystem-level @prepare() for all of them. 141 * 142 * @freeze_late: Continue operations started by @freeze(). Analogous to 143 * @suspend_late(), but it should not enable the device to signal wakeup 144 * events or change its power state. 145 * 146 * @thaw: Hibernation-specific, executed after creating a hibernation image OR 147 * if the creation of an image has failed. Also executed after a failing 148 * attempt to restore the contents of main memory from such an image. 149 * Undo the changes made by the preceding @freeze(), so the device can be 150 * operated in the same way as immediately before the call to @freeze(). 151 * Subsystem-level @thaw() is executed for all devices after invoking 152 * subsystem-level @thaw_noirq() for all of them. It also may be executed 153 * directly after @freeze() in case of a transition error. 154 * 155 * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the 156 * preceding @freeze_late(). 157 * 158 * @poweroff: Hibernation-specific, executed after saving a hibernation image. 159 * Analogous to @suspend(), but it need not save the device's settings in 160 * memory. 161 * Subsystem-level @poweroff() is executed for all devices after invoking 162 * subsystem-level @prepare() for all of them. 163 * 164 * @poweroff_late: Continue operations started by @poweroff(). Analogous to 165 * @suspend_late(), but it need not save the device's settings in memory. 166 * 167 * @restore: Hibernation-specific, executed after restoring the contents of main 168 * memory from a hibernation image, analogous to @resume(). 169 * 170 * @restore_early: Prepare to execute @restore(), analogous to @resume_early(). 171 * 172 * @suspend_noirq: Complete the actions started by @suspend(). Carry out any 173 * additional operations required for suspending the device that might be 174 * racing with its driver's interrupt handler, which is guaranteed not to 175 * run while @suspend_noirq() is being executed. 176 * It generally is expected that the device will be in a low-power state 177 * (appropriate for the target system sleep state) after subsystem-level 178 * @suspend_noirq() has returned successfully. If the device can generate 179 * system wakeup signals and is enabled to wake up the system, it should be 180 * configured to do so at that time. However, depending on the platform 181 * and device's subsystem, @suspend() or @suspend_late() may be allowed to 182 * put the device into the low-power state and configure it to generate 183 * wakeup signals, in which case it generally is not necessary to define 184 * @suspend_noirq(). 185 * 186 * @resume_noirq: Prepare for the execution of @resume() by carrying out any 187 * operations required for resuming the device that might be racing with 188 * its driver's interrupt handler, which is guaranteed not to run while 189 * @resume_noirq() is being executed. 190 * 191 * @freeze_noirq: Complete the actions started by @freeze(). Carry out any 192 * additional operations required for freezing the device that might be 193 * racing with its driver's interrupt handler, which is guaranteed not to 194 * run while @freeze_noirq() is being executed. 195 * The power state of the device should not be changed by either @freeze(), 196 * or @freeze_late(), or @freeze_noirq() and it should not be configured to 197 * signal system wakeup by any of these callbacks. 198 * 199 * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any 200 * operations required for thawing the device that might be racing with its 201 * driver's interrupt handler, which is guaranteed not to run while 202 * @thaw_noirq() is being executed. 203 * 204 * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to 205 * @suspend_noirq(), but it need not save the device's settings in memory. 206 * 207 * @restore_noirq: Prepare for the execution of @restore() by carrying out any 208 * operations required for thawing the device that might be racing with its 209 * driver's interrupt handler, which is guaranteed not to run while 210 * @restore_noirq() is being executed. Analogous to @resume_noirq(). 211 * 212 * All of the above callbacks, except for @complete(), return error codes. 213 * However, the error codes returned by the resume operations, @resume(), 214 * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do 215 * not cause the PM core to abort the resume transition during which they are 216 * returned. The error codes returned in those cases are only printed by the PM 217 * core to the system logs for debugging purposes. Still, it is recommended 218 * that drivers only return error codes from their resume methods in case of an 219 * unrecoverable failure (i.e. when the device being handled refuses to resume 220 * and becomes unusable) to allow us to modify the PM core in the future, so 221 * that it can avoid attempting to handle devices that failed to resume and 222 * their children. 223 * 224 * It is allowed to unregister devices while the above callbacks are being 225 * executed. However, a callback routine must NOT try to unregister the device 226 * it was called for, although it may unregister children of that device (for 227 * example, if it detects that a child was unplugged while the system was 228 * asleep). 229 * 230 * Refer to Documentation/power/devices.txt for more information about the role 231 * of the above callbacks in the system suspend process. 232 * 233 * There also are callbacks related to runtime power management of devices. 234 * Again, these callbacks are executed by the PM core only for subsystems 235 * (PM domains, device types, classes and bus types) and the subsystem-level 236 * callbacks are supposed to invoke the driver callbacks. Moreover, the exact 237 * actions to be performed by a device driver's callbacks generally depend on 238 * the platform and subsystem the device belongs to. 239 * 240 * @runtime_suspend: Prepare the device for a condition in which it won't be 241 * able to communicate with the CPU(s) and RAM due to power management. 242 * This need not mean that the device should be put into a low-power state. 243 * For example, if the device is behind a link which is about to be turned 244 * off, the device may remain at full power. If the device does go to low 245 * power and is capable of generating runtime wakeup events, remote wakeup 246 * (i.e., a hardware mechanism allowing the device to request a change of 247 * its power state via an interrupt) should be enabled for it. 248 * 249 * @runtime_resume: Put the device into the fully active state in response to a 250 * wakeup event generated by hardware or at the request of software. If 251 * necessary, put the device into the full-power state and restore its 252 * registers, so that it is fully operational. 253 * 254 * @runtime_idle: Device appears to be inactive and it might be put into a 255 * low-power state if all of the necessary conditions are satisfied. Check 256 * these conditions and handle the device as appropriate, possibly queueing 257 * a suspend request for it. The return value is ignored by the PM core. 258 * 259 * Refer to Documentation/power/runtime_pm.txt for more information about the 260 * role of the above callbacks in device runtime power management. 261 * 262 */ 263 264struct dev_pm_ops { 265 int (*prepare)(struct device *dev); 266 void (*complete)(struct device *dev); 267 int (*suspend)(struct device *dev); 268 int (*resume)(struct device *dev); 269 int (*freeze)(struct device *dev); 270 int (*thaw)(struct device *dev); 271 int (*poweroff)(struct device *dev); 272 int (*restore)(struct device *dev); 273 int (*suspend_late)(struct device *dev); 274 int (*resume_early)(struct device *dev); 275 int (*freeze_late)(struct device *dev); 276 int (*thaw_early)(struct device *dev); 277 int (*poweroff_late)(struct device *dev); 278 int (*restore_early)(struct device *dev); 279 int (*suspend_noirq)(struct device *dev); 280 int (*resume_noirq)(struct device *dev); 281 int (*freeze_noirq)(struct device *dev); 282 int (*thaw_noirq)(struct device *dev); 283 int (*poweroff_noirq)(struct device *dev); 284 int (*restore_noirq)(struct device *dev); 285 int (*runtime_suspend)(struct device *dev); 286 int (*runtime_resume)(struct device *dev); 287 int (*runtime_idle)(struct device *dev); 288}; 289 290#ifdef CONFIG_PM_SLEEP 291#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 292 .suspend = suspend_fn, \ 293 .resume = resume_fn, \ 294 .freeze = suspend_fn, \ 295 .thaw = resume_fn, \ 296 .poweroff = suspend_fn, \ 297 .restore = resume_fn, 298#else 299#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 300#endif 301 302#ifdef CONFIG_PM_RUNTIME 303#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 304 .runtime_suspend = suspend_fn, \ 305 .runtime_resume = resume_fn, \ 306 .runtime_idle = idle_fn, 307#else 308#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) 309#endif 310 311/* 312 * Use this if you want to use the same suspend and resume callbacks for suspend 313 * to RAM and hibernation. 314 */ 315#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ 316const struct dev_pm_ops name = { \ 317 SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 318} 319 320/* 321 * Use this for defining a set of PM operations to be used in all situations 322 * (sustem suspend, hibernation or runtime PM). 323 * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should 324 * be different from the corresponding runtime PM callbacks, .runtime_suspend(), 325 * and .runtime_resume(), because .runtime_suspend() always works on an already 326 * quiescent device, while .suspend() should assume that the device may be doing 327 * something when it is called (it should ensure that the device will be 328 * quiescent after it has returned). Therefore it's better to point the "late" 329 * suspend and "early" resume callback pointers, .suspend_late() and 330 * .resume_early(), to the same routines as .runtime_suspend() and 331 * .runtime_resume(), respectively (and analogously for hibernation). 332 */ 333#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ 334const struct dev_pm_ops name = { \ 335 SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 336 SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 337} 338 339/** 340 * PM_EVENT_ messages 341 * 342 * The following PM_EVENT_ messages are defined for the internal use of the PM 343 * core, in order to provide a mechanism allowing the high level suspend and 344 * hibernation code to convey the necessary information to the device PM core 345 * code: 346 * 347 * ON No transition. 348 * 349 * FREEZE System is going to hibernate, call ->prepare() and ->freeze() 350 * for all devices. 351 * 352 * SUSPEND System is going to suspend, call ->prepare() and ->suspend() 353 * for all devices. 354 * 355 * HIBERNATE Hibernation image has been saved, call ->prepare() and 356 * ->poweroff() for all devices. 357 * 358 * QUIESCE Contents of main memory are going to be restored from a (loaded) 359 * hibernation image, call ->prepare() and ->freeze() for all 360 * devices. 361 * 362 * RESUME System is resuming, call ->resume() and ->complete() for all 363 * devices. 364 * 365 * THAW Hibernation image has been created, call ->thaw() and 366 * ->complete() for all devices. 367 * 368 * RESTORE Contents of main memory have been restored from a hibernation 369 * image, call ->restore() and ->complete() for all devices. 370 * 371 * RECOVER Creation of a hibernation image or restoration of the main 372 * memory contents from a hibernation image has failed, call 373 * ->thaw() and ->complete() for all devices. 374 * 375 * The following PM_EVENT_ messages are defined for internal use by 376 * kernel subsystems. They are never issued by the PM core. 377 * 378 * USER_SUSPEND Manual selective suspend was issued by userspace. 379 * 380 * USER_RESUME Manual selective resume was issued by userspace. 381 * 382 * REMOTE_WAKEUP Remote-wakeup request was received from the device. 383 * 384 * AUTO_SUSPEND Automatic (device idle) runtime suspend was 385 * initiated by the subsystem. 386 * 387 * AUTO_RESUME Automatic (device needed) runtime resume was 388 * requested by a driver. 389 */ 390 391#define PM_EVENT_INVALID (-1) 392#define PM_EVENT_ON 0x0000 393#define PM_EVENT_FREEZE 0x0001 394#define PM_EVENT_SUSPEND 0x0002 395#define PM_EVENT_HIBERNATE 0x0004 396#define PM_EVENT_QUIESCE 0x0008 397#define PM_EVENT_RESUME 0x0010 398#define PM_EVENT_THAW 0x0020 399#define PM_EVENT_RESTORE 0x0040 400#define PM_EVENT_RECOVER 0x0080 401#define PM_EVENT_USER 0x0100 402#define PM_EVENT_REMOTE 0x0200 403#define PM_EVENT_AUTO 0x0400 404 405#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) 406#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) 407#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) 408#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME) 409#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) 410#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) 411 412#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, }) 413#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) 414#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) 415#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) 416#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) 417#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) 418#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) 419#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) 420#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) 421#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) 422#define PMSG_USER_SUSPEND ((struct pm_message) \ 423 { .event = PM_EVENT_USER_SUSPEND, }) 424#define PMSG_USER_RESUME ((struct pm_message) \ 425 { .event = PM_EVENT_USER_RESUME, }) 426#define PMSG_REMOTE_RESUME ((struct pm_message) \ 427 { .event = PM_EVENT_REMOTE_RESUME, }) 428#define PMSG_AUTO_SUSPEND ((struct pm_message) \ 429 { .event = PM_EVENT_AUTO_SUSPEND, }) 430#define PMSG_AUTO_RESUME ((struct pm_message) \ 431 { .event = PM_EVENT_AUTO_RESUME, }) 432 433#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) 434 435/** 436 * Device run-time power management status. 437 * 438 * These status labels are used internally by the PM core to indicate the 439 * current status of a device with respect to the PM core operations. They do 440 * not reflect the actual power state of the device or its status as seen by the 441 * driver. 442 * 443 * RPM_ACTIVE Device is fully operational. Indicates that the device 444 * bus type's ->runtime_resume() callback has completed 445 * successfully. 446 * 447 * RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has 448 * completed successfully. The device is regarded as 449 * suspended. 450 * 451 * RPM_RESUMING Device bus type's ->runtime_resume() callback is being 452 * executed. 453 * 454 * RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being 455 * executed. 456 */ 457 458enum rpm_status { 459 RPM_ACTIVE = 0, 460 RPM_RESUMING, 461 RPM_SUSPENDED, 462 RPM_SUSPENDING, 463}; 464 465/** 466 * Device run-time power management request types. 467 * 468 * RPM_REQ_NONE Do nothing. 469 * 470 * RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback 471 * 472 * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback 473 * 474 * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has 475 * been inactive for as long as power.autosuspend_delay 476 * 477 * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback 478 */ 479 480enum rpm_request { 481 RPM_REQ_NONE = 0, 482 RPM_REQ_IDLE, 483 RPM_REQ_SUSPEND, 484 RPM_REQ_AUTOSUSPEND, 485 RPM_REQ_RESUME, 486}; 487 488struct wakeup_source; 489 490struct pm_domain_data { 491 struct list_head list_node; 492 struct device *dev; 493}; 494 495struct pm_subsys_data { 496 spinlock_t lock; 497 unsigned int refcount; 498#ifdef CONFIG_PM_CLK 499 struct list_head clock_list; 500#endif 501#ifdef CONFIG_PM_GENERIC_DOMAINS 502 struct pm_domain_data *domain_data; 503#endif 504}; 505 506struct dev_pm_info { 507 pm_message_t power_state; 508 unsigned int can_wakeup:1; 509 unsigned int async_suspend:1; 510 bool is_prepared:1; /* Owned by the PM core */ 511 bool is_suspended:1; /* Ditto */ 512 bool ignore_children:1; 513 spinlock_t lock; 514#ifdef CONFIG_PM_SLEEP 515 struct list_head entry; 516 struct completion completion; 517 struct wakeup_source *wakeup; 518 bool wakeup_path:1; 519#else 520 unsigned int should_wakeup:1; 521#endif 522#ifdef CONFIG_PM_RUNTIME 523 struct timer_list suspend_timer; 524 unsigned long timer_expires; 525 struct work_struct work; 526 wait_queue_head_t wait_queue; 527 atomic_t usage_count; 528 atomic_t child_count; 529 unsigned int disable_depth:3; 530 unsigned int idle_notification:1; 531 unsigned int request_pending:1; 532 unsigned int deferred_resume:1; 533 unsigned int run_wake:1; 534 unsigned int runtime_auto:1; 535 unsigned int no_callbacks:1; 536 unsigned int irq_safe:1; 537 unsigned int use_autosuspend:1; 538 unsigned int timer_autosuspends:1; 539 enum rpm_request request; 540 enum rpm_status runtime_status; 541 int runtime_error; 542 int autosuspend_delay; 543 unsigned long last_busy; 544 unsigned long active_jiffies; 545 unsigned long suspended_jiffies; 546 unsigned long accounting_timestamp; 547 ktime_t suspend_time; 548 s64 max_time_suspended_ns; 549 struct dev_pm_qos_request *pq_req; 550#endif 551 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ 552 struct pm_qos_constraints *constraints; 553}; 554 555extern void update_pm_runtime_accounting(struct device *dev); 556extern int dev_pm_get_subsys_data(struct device *dev); 557extern int dev_pm_put_subsys_data(struct device *dev); 558 559/* 560 * Power domains provide callbacks that are executed during system suspend, 561 * hibernation, system resume and during runtime PM transitions along with 562 * subsystem-level and driver-level callbacks. 563 */ 564struct dev_pm_domain { 565 struct dev_pm_ops ops; 566}; 567 568/* 569 * The PM_EVENT_ messages are also used by drivers implementing the legacy 570 * suspend framework, based on the ->suspend() and ->resume() callbacks common 571 * for suspend and hibernation transitions, according to the rules below. 572 */ 573 574/* Necessary, because several drivers use PM_EVENT_PRETHAW */ 575#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE 576 577/* 578 * One transition is triggered by resume(), after a suspend() call; the 579 * message is implicit: 580 * 581 * ON Driver starts working again, responding to hardware events 582 * and software requests. The hardware may have gone through 583 * a power-off reset, or it may have maintained state from the 584 * previous suspend() which the driver will rely on while 585 * resuming. On most platforms, there are no restrictions on 586 * availability of resources like clocks during resume(). 587 * 588 * Other transitions are triggered by messages sent using suspend(). All 589 * these transitions quiesce the driver, so that I/O queues are inactive. 590 * That commonly entails turning off IRQs and DMA; there may be rules 591 * about how to quiesce that are specific to the bus or the device's type. 592 * (For example, network drivers mark the link state.) Other details may 593 * differ according to the message: 594 * 595 * SUSPEND Quiesce, enter a low power device state appropriate for 596 * the upcoming system state (such as PCI_D3hot), and enable 597 * wakeup events as appropriate. 598 * 599 * HIBERNATE Enter a low power device state appropriate for the hibernation 600 * state (eg. ACPI S4) and enable wakeup events as appropriate. 601 * 602 * FREEZE Quiesce operations so that a consistent image can be saved; 603 * but do NOT otherwise enter a low power device state, and do 604 * NOT emit system wakeup events. 605 * 606 * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring 607 * the system from a snapshot taken after an earlier FREEZE. 608 * Some drivers will need to reset their hardware state instead 609 * of preserving it, to ensure that it's never mistaken for the 610 * state which that earlier snapshot had set up. 611 * 612 * A minimally power-aware driver treats all messages as SUSPEND, fully 613 * reinitializes its device during resume() -- whether or not it was reset 614 * during the suspend/resume cycle -- and can't issue wakeup events. 615 * 616 * More power-aware drivers may also use low power states at runtime as 617 * well as during system sleep states like PM_SUSPEND_STANDBY. They may 618 * be able to use wakeup events to exit from runtime low-power states, 619 * or from system low-power states such as standby or suspend-to-RAM. 620 */ 621 622#ifdef CONFIG_PM_SLEEP 623extern void device_pm_lock(void); 624extern void dpm_resume_start(pm_message_t state); 625extern void dpm_resume_end(pm_message_t state); 626extern void dpm_resume(pm_message_t state); 627extern void dpm_complete(pm_message_t state); 628 629extern void device_pm_unlock(void); 630extern int dpm_suspend_end(pm_message_t state); 631extern int dpm_suspend_start(pm_message_t state); 632extern int dpm_suspend(pm_message_t state); 633extern int dpm_prepare(pm_message_t state); 634 635extern void __suspend_report_result(const char *function, void *fn, int ret); 636 637#define suspend_report_result(fn, ret) \ 638 do { \ 639 __suspend_report_result(__func__, fn, ret); \ 640 } while (0) 641 642extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); 643 644extern int pm_generic_prepare(struct device *dev); 645extern int pm_generic_suspend_late(struct device *dev); 646extern int pm_generic_suspend_noirq(struct device *dev); 647extern int pm_generic_suspend(struct device *dev); 648extern int pm_generic_resume_early(struct device *dev); 649extern int pm_generic_resume_noirq(struct device *dev); 650extern int pm_generic_resume(struct device *dev); 651extern int pm_generic_freeze_noirq(struct device *dev); 652extern int pm_generic_freeze_late(struct device *dev); 653extern int pm_generic_freeze(struct device *dev); 654extern int pm_generic_thaw_noirq(struct device *dev); 655extern int pm_generic_thaw_early(struct device *dev); 656extern int pm_generic_thaw(struct device *dev); 657extern int pm_generic_restore_noirq(struct device *dev); 658extern int pm_generic_restore_early(struct device *dev); 659extern int pm_generic_restore(struct device *dev); 660extern int pm_generic_poweroff_noirq(struct device *dev); 661extern int pm_generic_poweroff_late(struct device *dev); 662extern int pm_generic_poweroff(struct device *dev); 663extern void pm_generic_complete(struct device *dev); 664 665#else /* !CONFIG_PM_SLEEP */ 666 667#define device_pm_lock() do {} while (0) 668#define device_pm_unlock() do {} while (0) 669 670static inline int dpm_suspend_start(pm_message_t state) 671{ 672 return 0; 673} 674 675#define suspend_report_result(fn, ret) do {} while (0) 676 677static inline int device_pm_wait_for_dev(struct device *a, struct device *b) 678{ 679 return 0; 680} 681 682#define pm_generic_prepare NULL 683#define pm_generic_suspend NULL 684#define pm_generic_resume NULL 685#define pm_generic_freeze NULL 686#define pm_generic_thaw NULL 687#define pm_generic_restore NULL 688#define pm_generic_poweroff NULL 689#define pm_generic_complete NULL 690#endif /* !CONFIG_PM_SLEEP */ 691 692/* How to reorder dpm_list after device_move() */ 693enum dpm_order { 694 DPM_ORDER_NONE, 695 DPM_ORDER_DEV_AFTER_PARENT, 696 DPM_ORDER_PARENT_BEFORE_DEV, 697 DPM_ORDER_DEV_LAST, 698}; 699 700#endif /* _LINUX_PM_H */ 701