Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.
1: /* 2: * pm.h - Power management interface 3: * 4: * Copyright (C) 2000 Andrew Henroid 5: * 6: * This program is free software; you can redistribute it and/or modify 7: * it under the terms of the GNU General Public License as published by 8: * the Free Software Foundation; either version 2 of the License, or 9: * (at your option) any later version. 10: * 11: * This program is distributed in the hope that it will be useful, 12: * but WITHOUT ANY WARRANTY; without even the implied warranty of 13: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14: * GNU General Public License for more details. 15: * 16: * You should have received a copy of the GNU General Public License 17: * along with this program; if not, write to the Free Software 18: * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19: */ 20: 21: #ifndef _LINUX_PM_H 22: #define _LINUX_PM_H 23: 24: #include <linux/list.h> 25: #include <linux/workqueue.h> 26: #include <linux/spinlock.h> 27: #include <linux/wait.h> 28: #include <linux/timer.h> 29: #include <linux/completion.h> 30: 31: /* 32: * Callbacks for platform drivers to implement. 33: */ 34: extern void (*pm_power_off)(void); 35: extern void (*pm_power_off_prepare)(void); 36: 37: struct device; /* we have a circular dep with device.h */ 38: #ifdef CONFIG_VT_CONSOLE_SLEEP 39: extern void pm_vt_switch_required(struct device *dev, bool required); 40: extern void pm_vt_switch_unregister(struct device *dev); 41: #else 42: static inline void pm_vt_switch_required(struct device *dev, bool required) 43: { 44: } 45: static inline void pm_vt_switch_unregister(struct device *dev) 46: { 47: } 48: #endif /* CONFIG_VT_CONSOLE_SLEEP */ 49: 50: /* 51: * Device power management 52: */ 53: 54: struct device; 55: 56: #ifdef CONFIG_PM 57: extern const char power_group_name[]; /* = "power" */ 58: #else 59: #define power_group_name NULL 60: #endif 61: 62: typedef struct pm_message { 63: int event; 64: } pm_message_t; 65: 66: /** 67: * struct dev_pm_ops - device PM callbacks 68: * 69: * Several device power state transitions are externally visible, affecting 70: * the state of pending I/O queues and (for drivers that touch hardware) 71: * interrupts, wakeups, DMA, and other hardware state. There may also be 72: * internal transitions to various low-power modes which are transparent 73: * to the rest of the driver stack (such as a driver that's ON gating off 74: * clocks which are not in active use). 75: * 76: * The externally visible transitions are handled with the help of callbacks 77: * included in this structure in such a way that two levels of callbacks are 78: * involved. First, the PM core executes callbacks provided by PM domains, 79: * device types, classes and bus types. They are the subsystem-level callbacks 80: * supposed to execute callbacks provided by device drivers, although they may 81: * choose not to do that. If the driver callbacks are executed, they have to 82: * collaborate with the subsystem-level callbacks to achieve the goals 83: * appropriate for the given system transition, given transition phase and the 84: * subsystem the device belongs to. 85: * 86: * @prepare: The principal role of this callback is to prevent new children of 87: * the device from being registered after it has returned (the driver's 88: * subsystem and generally the rest of the kernel is supposed to prevent 89: * new calls to the probe method from being made too once @prepare() has 90: * succeeded). If @prepare() detects a situation it cannot handle (e.g. 91: * registration of a child already in progress), it may return -EAGAIN, so 92: * that the PM core can execute it once again (e.g. after a new child has 93: * been registered) to recover from the race condition. 94: * This method is executed for all kinds of suspend transitions and is 95: * followed by one of the suspend callbacks: @suspend(), @freeze(), or 96: * @poweroff(). The PM core executes subsystem-level @prepare() for all 97: * devices before starting to invoke suspend callbacks for any of them, so 98: * generally devices may be assumed to be functional or to respond to 99: * runtime resume requests while @prepare() is being executed. However, 100: * device drivers may NOT assume anything about the availability of user 101: * space at that time and it is NOT valid to request firmware from within 102: * @prepare() (it's too late to do that). It also is NOT valid to allocate 103: * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. 104: * [To work around these limitations, drivers may register suspend and 105: * hibernation notifiers to be executed before the freezing of tasks.] 106: * 107: * @complete: Undo the changes made by @prepare(). This method is executed for 108: * all kinds of resume transitions, following one of the resume callbacks: 109: * @resume(), @thaw(), @restore(). Also called if the state transition 110: * fails before the driver's suspend callback: @suspend(), @freeze() or 111: * @poweroff(), can be executed (e.g. if the suspend callback fails for one 112: * of the other devices that the PM core has unsuccessfully attempted to 113: * suspend earlier). 114: * The PM core executes subsystem-level @complete() after it has executed 115: * the appropriate resume callbacks for all devices. 116: * 117: * @suspend: Executed before putting the system into a sleep state in which the 118: * contents of main memory are preserved. The exact action to perform 119: * depends on the device's subsystem (PM domain, device type, class or bus 120: * type), but generally the device must be quiescent after subsystem-level 121: * @suspend() has returned, so that it doesn't do any I/O or DMA. 122: * Subsystem-level @suspend() is executed for all devices after invoking 123: * subsystem-level @prepare() for all of them. 124: * 125: * @suspend_late: Continue operations started by @suspend(). For a number of 126: * devices @suspend_late() may point to the same callback routine as the 127: * runtime suspend callback. 128: * 129: * @resume: Executed after waking the system up from a sleep state in which the 130: * contents of main memory were preserved. The exact action to perform 131: * depends on the device's subsystem, but generally the driver is expected 132: * to start working again, responding to hardware events and software 133: * requests (the device itself may be left in a low-power state, waiting 134: * for a runtime resume to occur). The state of the device at the time its 135: * driver's @resume() callback is run depends on the platform and subsystem 136: * the device belongs to. On most platforms, there are no restrictions on 137: * availability of resources like clocks during @resume(). 138: * Subsystem-level @resume() is executed for all devices after invoking 139: * subsystem-level @resume_noirq() for all of them. 140: * 141: * @resume_early: Prepare to execute @resume(). For a number of devices 142: * @resume_early() may point to the same callback routine as the runtime 143: * resume callback. 144: * 145: * @freeze: Hibernation-specific, executed before creating a hibernation image. 146: * Analogous to @suspend(), but it should not enable the device to signal 147: * wakeup events or change its power state. The majority of subsystems 148: * (with the notable exception of the PCI bus type) expect the driver-level 149: * @freeze() to save the device settings in memory to be used by @restore() 150: * during the subsequent resume from hibernation. 151: * Subsystem-level @freeze() is executed for all devices after invoking 152: * subsystem-level @prepare() for all of them. 153: * 154: * @freeze_late: Continue operations started by @freeze(). Analogous to 155: * @suspend_late(), but it should not enable the device to signal wakeup 156: * events or change its power state. 157: * 158: * @thaw: Hibernation-specific, executed after creating a hibernation image OR 159: * if the creation of an image has failed. Also executed after a failing 160: * attempt to restore the contents of main memory from such an image. 161: * Undo the changes made by the preceding @freeze(), so the device can be 162: * operated in the same way as immediately before the call to @freeze(). 163: * Subsystem-level @thaw() is executed for all devices after invoking 164: * subsystem-level @thaw_noirq() for all of them. It also may be executed 165: * directly after @freeze() in case of a transition error. 166: * 167: * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the 168: * preceding @freeze_late(). 169: * 170: * @poweroff: Hibernation-specific, executed after saving a hibernation image. 171: * Analogous to @suspend(), but it need not save the device's settings in 172: * memory. 173: * Subsystem-level @poweroff() is executed for all devices after invoking 174: * subsystem-level @prepare() for all of them. 175: * 176: * @poweroff_late: Continue operations started by @poweroff(). Analogous to 177: * @suspend_late(), but it need not save the device's settings in memory. 178: * 179: * @restore: Hibernation-specific, executed after restoring the contents of main 180: * memory from a hibernation image, analogous to @resume(). 181: * 182: * @restore_early: Prepare to execute @restore(), analogous to @resume_early(). 183: * 184: * @suspend_noirq: Complete the actions started by @suspend(). Carry out any 185: * additional operations required for suspending the device that might be 186: * racing with its driver's interrupt handler, which is guaranteed not to 187: * run while @suspend_noirq() is being executed. 188: * It generally is expected that the device will be in a low-power state 189: * (appropriate for the target system sleep state) after subsystem-level 190: * @suspend_noirq() has returned successfully. If the device can generate 191: * system wakeup signals and is enabled to wake up the system, it should be 192: * configured to do so at that time. However, depending on the platform 193: * and device's subsystem, @suspend() or @suspend_late() may be allowed to 194: * put the device into the low-power state and configure it to generate 195: * wakeup signals, in which case it generally is not necessary to define 196: * @suspend_noirq(). 197: * 198: * @resume_noirq: Prepare for the execution of @resume() by carrying out any 199: * operations required for resuming the device that might be racing with 200: * its driver's interrupt handler, which is guaranteed not to run while 201: * @resume_noirq() is being executed. 202: * 203: * @freeze_noirq: Complete the actions started by @freeze(). Carry out any 204: * additional operations required for freezing the device that might be 205: * racing with its driver's interrupt handler, which is guaranteed not to 206: * run while @freeze_noirq() is being executed. 207: * The power state of the device should not be changed by either @freeze(), 208: * or @freeze_late(), or @freeze_noirq() and it should not be configured to 209: * signal system wakeup by any of these callbacks. 210: * 211: * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any 212: * operations required for thawing the device that might be racing with its 213: * driver's interrupt handler, which is guaranteed not to run while 214: * @thaw_noirq() is being executed. 215: * 216: * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to 217: * @suspend_noirq(), but it need not save the device's settings in memory. 218: * 219: * @restore_noirq: Prepare for the execution of @restore() by carrying out any 220: * operations required for thawing the device that might be racing with its 221: * driver's interrupt handler, which is guaranteed not to run while 222: * @restore_noirq() is being executed. Analogous to @resume_noirq(). 223: * 224: * All of the above callbacks, except for @complete(), return error codes. 225: * However, the error codes returned by the resume operations, @resume(), 226: * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do 227: * not cause the PM core to abort the resume transition during which they are 228: * returned. The error codes returned in those cases are only printed by the PM 229: * core to the system logs for debugging purposes. Still, it is recommended 230: * that drivers only return error codes from their resume methods in case of an 231: * unrecoverable failure (i.e. when the device being handled refuses to resume 232: * and becomes unusable) to allow us to modify the PM core in the future, so 233: * that it can avoid attempting to handle devices that failed to resume and 234: * their children. 235: * 236: * It is allowed to unregister devices while the above callbacks are being 237: * executed. However, a callback routine must NOT try to unregister the device 238: * it was called for, although it may unregister children of that device (for 239: * example, if it detects that a child was unplugged while the system was 240: * asleep). 241: * 242: * Refer to Documentation/power/devices.txt for more information about the role 243: * of the above callbacks in the system suspend process. 244: * 245: * There also are callbacks related to runtime power management of devices. 246: * Again, these callbacks are executed by the PM core only for subsystems 247: * (PM domains, device types, classes and bus types) and the subsystem-level 248: * callbacks are supposed to invoke the driver callbacks. Moreover, the exact 249: * actions to be performed by a device driver's callbacks generally depend on 250: * the platform and subsystem the device belongs to. 251: * 252: * @runtime_suspend: Prepare the device for a condition in which it won't be 253: * able to communicate with the CPU(s) and RAM due to power management. 254: * This need not mean that the device should be put into a low-power state. 255: * For example, if the device is behind a link which is about to be turned 256: * off, the device may remain at full power. If the device does go to low 257: * power and is capable of generating runtime wakeup events, remote wakeup 258: * (i.e., a hardware mechanism allowing the device to request a change of 259: * its power state via an interrupt) should be enabled for it. 260: * 261: * @runtime_resume: Put the device into the fully active state in response to a 262: * wakeup event generated by hardware or at the request of software. If 263: * necessary, put the device into the full-power state and restore its 264: * registers, so that it is fully operational. 265: * 266: * @runtime_idle: Device appears to be inactive and it might be put into a 267: * low-power state if all of the necessary conditions are satisfied. Check 268: * these conditions and handle the device as appropriate, possibly queueing 269: * a suspend request for it. The return value is ignored by the PM core. 270: * 271: * Refer to Documentation/power/runtime_pm.txt for more information about the 272: * role of the above callbacks in device runtime power management. 273: * 274: */ 275: 276: struct dev_pm_ops { 277: int (*prepare)(struct device *dev); 278: void (*complete)(struct device *dev); 279: int (*suspend)(struct device *dev); 280: int (*resume)(struct device *dev); 281: int (*freeze)(struct device *dev); 282: int (*thaw)(struct device *dev); 283: int (*poweroff)(struct device *dev); 284: int (*restore)(struct device *dev); 285: int (*suspend_late)(struct device *dev); 286: int (*resume_early)(struct device *dev); 287: int (*freeze_late)(struct device *dev); 288: int (*thaw_early)(struct device *dev); 289: int (*poweroff_late)(struct device *dev); 290: int (*restore_early)(struct device *dev); 291: int (*suspend_noirq)(struct device *dev); 292: int (*resume_noirq)(struct device *dev); 293: int (*freeze_noirq)(struct device *dev); 294: int (*thaw_noirq)(struct device *dev); 295: int (*poweroff_noirq)(struct device *dev); 296: int (*restore_noirq)(struct device *dev); 297: int (*runtime_suspend)(struct device *dev); 298: int (*runtime_resume)(struct device *dev); 299: int (*runtime_idle)(struct device *dev); 300: }; 301: 302: #ifdef CONFIG_PM_SLEEP 303: #define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 304: .suspend = suspend_fn, \ 305: .resume = resume_fn, \ 306: .freeze = suspend_fn, \ 307: .thaw = resume_fn, \ 308: .poweroff = suspend_fn, \ 309: .restore = resume_fn, 310: #else 311: #define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 312: #endif 313: 314: #ifdef CONFIG_PM_RUNTIME 315: #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 316: .runtime_suspend = suspend_fn, \ 317: .runtime_resume = resume_fn, \ 318: .runtime_idle = idle_fn, 319: #else 320: #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) 321: #endif 322: 323: /* 324: * Use this if you want to use the same suspend and resume callbacks for suspend 325: * to RAM and hibernation. 326: */ 327: #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ 328: const struct dev_pm_ops name = { \ 329: SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 330: } 331: 332: /* 333: * Use this for defining a set of PM operations to be used in all situations 334: * (sustem suspend, hibernation or runtime PM). 335: * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should 336: * be different from the corresponding runtime PM callbacks, .runtime_suspend(), 337: * and .runtime_resume(), because .runtime_suspend() always works on an already 338: * quiescent device, while .suspend() should assume that the device may be doing 339: * something when it is called (it should ensure that the device will be 340: * quiescent after it has returned). Therefore it's better to point the "late" 341: * suspend and "early" resume callback pointers, .suspend_late() and 342: * .resume_early(), to the same routines as .runtime_suspend() and 343: * .runtime_resume(), respectively (and analogously for hibernation). 344: */ 345: #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ 346: const struct dev_pm_ops name = { \ 347: SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 348: SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 349: } 350: 351: /** 352: * PM_EVENT_ messages 353: * 354: * The following PM_EVENT_ messages are defined for the internal use of the PM 355: * core, in order to provide a mechanism allowing the high level suspend and 356: * hibernation code to convey the necessary information to the device PM core 357: * code: 358: * 359: * ON No transition. 360: * 361: * FREEZE System is going to hibernate, call ->prepare() and ->freeze() 362: * for all devices. 363: * 364: * SUSPEND System is going to suspend, call ->prepare() and ->suspend() 365: * for all devices. 366: * 367: * HIBERNATE Hibernation image has been saved, call ->prepare() and 368: * ->poweroff() for all devices. 369: * 370: * QUIESCE Contents of main memory are going to be restored from a (loaded) 371: * hibernation image, call ->prepare() and ->freeze() for all 372: * devices. 373: * 374: * RESUME System is resuming, call ->resume() and ->complete() for all 375: * devices. 376: * 377: * THAW Hibernation image has been created, call ->thaw() and 378: * ->complete() for all devices. 379: * 380: * RESTORE Contents of main memory have been restored from a hibernation 381: * image, call ->restore() and ->complete() for all devices. 382: * 383: * RECOVER Creation of a hibernation image or restoration of the main 384: * memory contents from a hibernation image has failed, call 385: * ->thaw() and ->complete() for all devices. 386: * 387: * The following PM_EVENT_ messages are defined for internal use by 388: * kernel subsystems. They are never issued by the PM core. 389: * 390: * USER_SUSPEND Manual selective suspend was issued by userspace. 391: * 392: * USER_RESUME Manual selective resume was issued by userspace. 393: * 394: * REMOTE_WAKEUP Remote-wakeup request was received from the device. 395: * 396: * AUTO_SUSPEND Automatic (device idle) runtime suspend was 397: * initiated by the subsystem. 398: * 399: * AUTO_RESUME Automatic (device needed) runtime resume was 400: * requested by a driver. 401: */ 402: 403: #define PM_EVENT_INVALID (-1) 404: #define PM_EVENT_ON 0x0000 405: #define PM_EVENT_FREEZE 0x0001 406: #define PM_EVENT_SUSPEND 0x0002 407: #define PM_EVENT_HIBERNATE 0x0004 408: #define PM_EVENT_QUIESCE 0x0008 409: #define PM_EVENT_RESUME 0x0010 410: #define PM_EVENT_THAW 0x0020 411: #define PM_EVENT_RESTORE 0x0040 412: #define PM_EVENT_RECOVER 0x0080 413: #define PM_EVENT_USER 0x0100 414: #define PM_EVENT_REMOTE 0x0200 415: #define PM_EVENT_AUTO 0x0400 416: 417: #define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) 418: #define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) 419: #define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) 420: #define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME) 421: #define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) 422: #define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) 423: 424: #define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, }) 425: #define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) 426: #define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) 427: #define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) 428: #define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) 429: #define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) 430: #define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) 431: #define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) 432: #define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) 433: #define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) 434: #define PMSG_USER_SUSPEND ((struct pm_message) \ 435: { .event = PM_EVENT_USER_SUSPEND, }) 436: #define PMSG_USER_RESUME ((struct pm_message) \ 437: { .event = PM_EVENT_USER_RESUME, }) 438: #define PMSG_REMOTE_RESUME ((struct pm_message) \ 439: { .event = PM_EVENT_REMOTE_RESUME, }) 440: #define PMSG_AUTO_SUSPEND ((struct pm_message) \ 441: { .event = PM_EVENT_AUTO_SUSPEND, }) 442: #define PMSG_AUTO_RESUME ((struct pm_message) \ 443: { .event = PM_EVENT_AUTO_RESUME, }) 444: 445: #define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) 446: 447: /** 448: * Device run-time power management status. 449: * 450: * These status labels are used internally by the PM core to indicate the 451: * current status of a device with respect to the PM core operations. They do 452: * not reflect the actual power state of the device or its status as seen by the 453: * driver. 454: * 455: * RPM_ACTIVE Device is fully operational. Indicates that the device 456: * bus type's ->runtime_resume() callback has completed 457: * successfully. 458: * 459: * RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has 460: * completed successfully. The device is regarded as 461: * suspended. 462: * 463: * RPM_RESUMING Device bus type's ->runtime_resume() callback is being 464: * executed. 465: * 466: * RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being 467: * executed. 468: */ 469: 470: enum rpm_status { 471: RPM_ACTIVE = 0, 472: RPM_RESUMING, 473: RPM_SUSPENDED, 474: RPM_SUSPENDING, 475: }; 476: 477: /** 478: * Device run-time power management request types. 479: * 480: * RPM_REQ_NONE Do nothing. 481: * 482: * RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback 483: * 484: * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback 485: * 486: * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has 487: * been inactive for as long as power.autosuspend_delay 488: * 489: * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback 490: */ 491: 492: enum rpm_request { 493: RPM_REQ_NONE = 0, 494: RPM_REQ_IDLE, 495: RPM_REQ_SUSPEND, 496: RPM_REQ_AUTOSUSPEND, 497: RPM_REQ_RESUME, 498: }; 499: 500: struct wakeup_source; 501: 502: struct pm_domain_data { 503: struct list_head list_node; 504: struct device *dev; 505: }; 506: 507: struct pm_subsys_data { 508: spinlock_t lock; 509: unsigned int refcount; 510: #ifdef CONFIG_PM_CLK 511: struct list_head clock_list; 512: #endif 513: #ifdef CONFIG_PM_GENERIC_DOMAINS 514: struct pm_domain_data *domain_data; 515: #endif 516: }; 517: 518: struct dev_pm_info { 519: pm_message_t power_state; 520: unsigned int can_wakeup:1; 521: unsigned int async_suspend:1; 522: bool is_prepared:1; /* Owned by the PM core */ 523: bool is_suspended:1; /* Ditto */ 524: bool ignore_children:1; 525: bool early_init:1; /* Owned by the PM core */ 526: spinlock_t lock; 527: #ifdef CONFIG_PM_SLEEP 528: struct list_head entry; 529: struct completion completion; 530: struct wakeup_source *wakeup; 531: bool wakeup_path:1; 532: bool syscore:1; 533: #else 534: unsigned int should_wakeup:1; 535: #endif 536: #ifdef CONFIG_PM_RUNTIME 537: struct timer_list suspend_timer; 538: unsigned long timer_expires; 539: struct work_struct work; 540: wait_queue_head_t wait_queue; 541: atomic_t usage_count; 542: atomic_t child_count; 543: unsigned int disable_depth:3; 544: unsigned int idle_notification:1; 545: unsigned int request_pending:1; 546: unsigned int deferred_resume:1; 547: unsigned int run_wake:1; 548: unsigned int runtime_auto:1; 549: unsigned int no_callbacks:1; 550: unsigned int irq_safe:1; 551: unsigned int use_autosuspend:1; 552: unsigned int timer_autosuspends:1; 553: unsigned int memalloc_noio:1; 554: enum rpm_request request; 555: enum rpm_status runtime_status; 556: int runtime_error; 557: int autosuspend_delay; 558: unsigned long last_busy; 559: unsigned long active_jiffies; 560: unsigned long suspended_jiffies; 561: unsigned long accounting_timestamp; 562: #endif 563: struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ 564: struct dev_pm_qos *qos; 565: }; 566: 567: extern void update_pm_runtime_accounting(struct device *dev); 568: extern int dev_pm_get_subsys_data(struct device *dev); 569: extern int dev_pm_put_subsys_data(struct device *dev); 570: 571: /* 572: * Power domains provide callbacks that are executed during system suspend, 573: * hibernation, system resume and during runtime PM transitions along with 574: * subsystem-level and driver-level callbacks. 575: */ 576: struct dev_pm_domain { 577: struct dev_pm_ops ops; 578: }; 579: 580: /* 581: * The PM_EVENT_ messages are also used by drivers implementing the legacy 582: * suspend framework, based on the ->suspend() and ->resume() callbacks common 583: * for suspend and hibernation transitions, according to the rules below. 584: */ 585: 586: /* Necessary, because several drivers use PM_EVENT_PRETHAW */ 587: #define PM_EVENT_PRETHAW PM_EVENT_QUIESCE 588: 589: /* 590: * One transition is triggered by resume(), after a suspend() call; the 591: * message is implicit: 592: * 593: * ON Driver starts working again, responding to hardware events 594: * and software requests. The hardware may have gone through 595: * a power-off reset, or it may have maintained state from the 596: * previous suspend() which the driver will rely on while 597: * resuming. On most platforms, there are no restrictions on 598: * availability of resources like clocks during resume(). 599: * 600: * Other transitions are triggered by messages sent using suspend(). All 601: * these transitions quiesce the driver, so that I/O queues are inactive. 602: * That commonly entails turning off IRQs and DMA; there may be rules 603: * about how to quiesce that are specific to the bus or the device's type. 604: * (For example, network drivers mark the link state.) Other details may 605: * differ according to the message: 606: * 607: * SUSPEND Quiesce, enter a low power device state appropriate for 608: * the upcoming system state (such as PCI_D3hot), and enable 609: * wakeup events as appropriate. 610: * 611: * HIBERNATE Enter a low power device state appropriate for the hibernation 612: * state (eg. ACPI S4) and enable wakeup events as appropriate. 613: * 614: * FREEZE Quiesce operations so that a consistent image can be saved; 615: * but do NOT otherwise enter a low power device state, and do 616: * NOT emit system wakeup events. 617: * 618: * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring 619: * the system from a snapshot taken after an earlier FREEZE. 620: * Some drivers will need to reset their hardware state instead 621: * of preserving it, to ensure that it's never mistaken for the 622: * state which that earlier snapshot had set up. 623: * 624: * A minimally power-aware driver treats all messages as SUSPEND, fully 625: * reinitializes its device during resume() -- whether or not it was reset 626: * during the suspend/resume cycle -- and can't issue wakeup events. 627: * 628: * More power-aware drivers may also use low power states at runtime as 629: * well as during system sleep states like PM_SUSPEND_STANDBY. They may 630: * be able to use wakeup events to exit from runtime low-power states, 631: * or from system low-power states such as standby or suspend-to-RAM. 632: */ 633: 634: #ifdef CONFIG_PM_SLEEP 635: extern void device_pm_lock(void); 636: extern void dpm_resume_start(pm_message_t state); 637: extern void dpm_resume_end(pm_message_t state); 638: extern void dpm_resume(pm_message_t state); 639: extern void dpm_complete(pm_message_t state); 640: 641: extern void device_pm_unlock(void); 642: extern int dpm_suspend_end(pm_message_t state); 643: extern int dpm_suspend_start(pm_message_t state); 644: extern int dpm_suspend(pm_message_t state); 645: extern int dpm_prepare(pm_message_t state); 646: 647: extern void __suspend_report_result(const char *function, void *fn, int ret); 648: 649: #define suspend_report_result(fn, ret) \ 650: do { \ 651: __suspend_report_result(__func__, fn, ret); \ 652: } while (0) 653: 654: extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); 655: extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)); 656: 657: extern int pm_generic_prepare(struct device *dev); 658: extern int pm_generic_suspend_late(struct device *dev); 659: extern int pm_generic_suspend_noirq(struct device *dev); 660: extern int pm_generic_suspend(struct device *dev); 661: extern int pm_generic_resume_early(struct device *dev); 662: extern int pm_generic_resume_noirq(struct device *dev); 663: extern int pm_generic_resume(struct device *dev); 664: extern int pm_generic_freeze_noirq(struct device *dev); 665: extern int pm_generic_freeze_late(struct device *dev); 666: extern int pm_generic_freeze(struct device *dev); 667: extern int pm_generic_thaw_noirq(struct device *dev); 668: extern int pm_generic_thaw_early(struct device *dev); 669: extern int pm_generic_thaw(struct device *dev); 670: extern int pm_generic_restore_noirq(struct device *dev); 671: extern int pm_generic_restore_early(struct device *dev); 672: extern int pm_generic_restore(struct device *dev); 673: extern int pm_generic_poweroff_noirq(struct device *dev); 674: extern int pm_generic_poweroff_late(struct device *dev); 675: extern int pm_generic_poweroff(struct device *dev); 676: extern void pm_generic_complete(struct device *dev); 677: 678: #else /* !CONFIG_PM_SLEEP */ 679: 680: #define device_pm_lock() do {} while (0) 681: #define device_pm_unlock() do {} while (0) 682: 683: static inline int dpm_suspend_start(pm_message_t state) 684: { 685: return 0; 686: } 687: 688: #define suspend_report_result(fn, ret) do {} while (0) 689: 690: static inline int device_pm_wait_for_dev(struct device *a, struct device *b) 691: { 692: return 0; 693: } 694: 695: static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) 696: { 697: } 698: 699: #define pm_generic_prepare NULL 700: #define pm_generic_suspend NULL 701: #define pm_generic_resume NULL 702: #define pm_generic_freeze NULL 703: #define pm_generic_thaw NULL 704: #define pm_generic_restore NULL 705: #define pm_generic_poweroff NULL 706: #define pm_generic_complete NULL 707: #endif /* !CONFIG_PM_SLEEP */ 708: 709: /* How to reorder dpm_list after device_move() */ 710: enum dpm_order { 711: DPM_ORDER_NONE, 712: DPM_ORDER_DEV_AFTER_PARENT, 713: DPM_ORDER_PARENT_BEFORE_DEV, 714: DPM_ORDER_DEV_LAST, 715: }; 716: 717: #endif /* _LINUX_PM_H */ 718: