Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.
1: #ifndef _LINUX_SUSPEND_H 2: #define _LINUX_SUSPEND_H 3: 4: #include <linux/swap.h> 5: #include <linux/notifier.h> 6: #include <linux/init.h> 7: #include <linux/pm.h> 8: #include <linux/mm.h> 9: #include <linux/freezer.h> 10: #include <asm/errno.h> 11: 12: #ifdef CONFIG_VT 13: extern void pm_set_vt_switch(int); 14: #else 15: static inline void pm_set_vt_switch(int do_switch) 16: { 17: } 18: #endif 19: 20: #ifdef CONFIG_VT_CONSOLE_SLEEP 21: extern int pm_prepare_console(void); 22: extern void pm_restore_console(void); 23: #else 24: static inline int pm_prepare_console(void) 25: { 26: return 0; 27: } 28: 29: static inline void pm_restore_console(void) 30: { 31: } 32: #endif 33: 34: typedef int __bitwise suspend_state_t; 35: 36: #define PM_SUSPEND_ON ((__force suspend_state_t) 0) 37: #define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1) 38: #define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2) 39: #define PM_SUSPEND_MEM ((__force suspend_state_t) 3) 40: #define PM_SUSPEND_MIN PM_SUSPEND_FREEZE 41: #define PM_SUSPEND_MAX ((__force suspend_state_t) 4) 42: 43: enum suspend_stat_step { 44: SUSPEND_FREEZE = 1, 45: SUSPEND_PREPARE, 46: SUSPEND_SUSPEND, 47: SUSPEND_SUSPEND_LATE, 48: SUSPEND_SUSPEND_NOIRQ, 49: SUSPEND_RESUME_NOIRQ, 50: SUSPEND_RESUME_EARLY, 51: SUSPEND_RESUME 52: }; 53: 54: struct suspend_stats { 55: int success; 56: int fail; 57: int failed_freeze; 58: int failed_prepare; 59: int failed_suspend; 60: int failed_suspend_late; 61: int failed_suspend_noirq; 62: int failed_resume; 63: int failed_resume_early; 64: int failed_resume_noirq; 65: #define REC_FAILED_NUM 2 66: int last_failed_dev; 67: char failed_devs[REC_FAILED_NUM][40]; 68: int last_failed_errno; 69: int errno[REC_FAILED_NUM]; 70: int last_failed_step; 71: enum suspend_stat_step failed_steps[REC_FAILED_NUM]; 72: }; 73: 74: extern struct suspend_stats suspend_stats; 75: 76: static inline void dpm_save_failed_dev(const char *name) 77: { 78: strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev], 79: name, 80: sizeof(suspend_stats.failed_devs[0])); 81: suspend_stats.last_failed_dev++; 82: suspend_stats.last_failed_dev %= REC_FAILED_NUM; 83: } 84: 85: static inline void dpm_save_failed_errno(int err) 86: { 87: suspend_stats.errno[suspend_stats.last_failed_errno] = err; 88: suspend_stats.last_failed_errno++; 89: suspend_stats.last_failed_errno %= REC_FAILED_NUM; 90: } 91: 92: static inline void dpm_save_failed_step(enum suspend_stat_step step) 93: { 94: suspend_stats.failed_steps[suspend_stats.last_failed_step] = step; 95: suspend_stats.last_failed_step++; 96: suspend_stats.last_failed_step %= REC_FAILED_NUM; 97: } 98: 99: /** 100: * struct platform_suspend_ops - Callbacks for managing platform dependent 101: * system sleep states. 102: * 103: * @valid: Callback to determine if given system sleep state is supported by 104: * the platform. 105: * Valid (ie. supported) states are advertised in /sys/power/state. Note 106: * that it still may be impossible to enter given system sleep state if the 107: * conditions aren't right. 108: * There is the %suspend_valid_only_mem function available that can be 109: * assigned to this if the platform only supports mem sleep. 110: * 111: * @begin: Initialise a transition to given system sleep state. 112: * @begin() is executed right prior to suspending devices. The information 113: * conveyed to the platform code by @begin() should be disregarded by it as 114: * soon as @end() is executed. If @begin() fails (ie. returns nonzero), 115: * @prepare(), @enter() and @finish() will not be called by the PM core. 116: * This callback is optional. However, if it is implemented, the argument 117: * passed to @enter() is redundant and should be ignored. 118: * 119: * @prepare: Prepare the platform for entering the system sleep state indicated 120: * by @begin(). 121: * @prepare() is called right after devices have been suspended (ie. the 122: * appropriate .suspend() method has been executed for each device) and 123: * before device drivers' late suspend callbacks are executed. It returns 124: * 0 on success or a negative error code otherwise, in which case the 125: * system cannot enter the desired sleep state (@prepare_late(), @enter(), 126: * and @wake() will not be called in that case). 127: * 128: * @prepare_late: Finish preparing the platform for entering the system sleep 129: * state indicated by @begin(). 130: * @prepare_late is called before disabling nonboot CPUs and after 131: * device drivers' late suspend callbacks have been executed. It returns 132: * 0 on success or a negative error code otherwise, in which case the 133: * system cannot enter the desired sleep state (@enter() will not be 134: * executed). 135: * 136: * @enter: Enter the system sleep state indicated by @begin() or represented by 137: * the argument if @begin() is not implemented. 138: * This callback is mandatory. It returns 0 on success or a negative 139: * error code otherwise, in which case the system cannot enter the desired 140: * sleep state. 141: * 142: * @wake: Called when the system has just left a sleep state, right after 143: * the nonboot CPUs have been enabled and before device drivers' early 144: * resume callbacks are executed. 145: * This callback is optional, but should be implemented by the platforms 146: * that implement @prepare_late(). If implemented, it is always called 147: * after @prepare_late and @enter(), even if one of them fails. 148: * 149: * @finish: Finish wake-up of the platform. 150: * @finish is called right prior to calling device drivers' regular suspend 151: * callbacks. 152: * This callback is optional, but should be implemented by the platforms 153: * that implement @prepare(). If implemented, it is always called after 154: * @enter() and @wake(), even if any of them fails. It is executed after 155: * a failing @prepare. 156: * 157: * @suspend_again: Returns whether the system should suspend again (true) or 158: * not (false). If the platform wants to poll sensors or execute some 159: * code during suspended without invoking userspace and most of devices, 160: * suspend_again callback is the place assuming that periodic-wakeup or 161: * alarm-wakeup is already setup. This allows to execute some codes while 162: * being kept suspended in the view of userland and devices. 163: * 164: * @end: Called by the PM core right after resuming devices, to indicate to 165: * the platform that the system has returned to the working state or 166: * the transition to the sleep state has been aborted. 167: * This callback is optional, but should be implemented by the platforms 168: * that implement @begin(). Accordingly, platforms implementing @begin() 169: * should also provide a @end() which cleans up transitions aborted before 170: * @enter(). 171: * 172: * @recover: Recover the platform from a suspend failure. 173: * Called by the PM core if the suspending of devices fails. 174: * This callback is optional and should only be implemented by platforms 175: * which require special recovery actions in that situation. 176: */ 177: struct platform_suspend_ops { 178: int (*valid)(suspend_state_t state); 179: int (*begin)(suspend_state_t state); 180: int (*prepare)(void); 181: int (*prepare_late)(void); 182: int (*enter)(suspend_state_t state); 183: void (*wake)(void); 184: void (*finish)(void); 185: bool (*suspend_again)(void); 186: void (*end)(void); 187: void (*recover)(void); 188: }; 189: 190: #ifdef CONFIG_SUSPEND 191: /** 192: * suspend_set_ops - set platform dependent suspend operations 193: * @ops: The new suspend operations to set. 194: */ 195: extern void suspend_set_ops(const struct platform_suspend_ops *ops); 196: extern int suspend_valid_only_mem(suspend_state_t state); 197: extern void freeze_wake(void); 198: 199: /** 200: * arch_suspend_disable_irqs - disable IRQs for suspend 201: * 202: * Disables IRQs (in the default case). This is a weak symbol in the common 203: * code and thus allows architectures to override it if more needs to be 204: * done. Not called for suspend to disk. 205: */ 206: extern void arch_suspend_disable_irqs(void); 207: 208: /** 209: * arch_suspend_enable_irqs - enable IRQs after suspend 210: * 211: * Enables IRQs (in the default case). This is a weak symbol in the common 212: * code and thus allows architectures to override it if more needs to be 213: * done. Not called for suspend to disk. 214: */ 215: extern void arch_suspend_enable_irqs(void); 216: 217: extern int pm_suspend(suspend_state_t state); 218: #else /* !CONFIG_SUSPEND */ 219: #define suspend_valid_only_mem NULL 220: 221: static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} 222: static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } 223: static inline void freeze_wake(void) {} 224: #endif /* !CONFIG_SUSPEND */ 225: 226: /* struct pbe is used for creating lists of pages that should be restored 227: * atomically during the resume from disk, because the page frames they have 228: * occupied before the suspend are in use. 229: */ 230: struct pbe { 231: void *address; /* address of the copy */ 232: void *orig_address; /* original address of a page */ 233: struct pbe *next; 234: }; 235: 236: /* mm/page_alloc.c */ 237: extern void mark_free_pages(struct zone *zone); 238: 239: /** 240: * struct platform_hibernation_ops - hibernation platform support 241: * 242: * The methods in this structure allow a platform to carry out special 243: * operations required by it during a hibernation transition. 244: * 245: * All the methods below, except for @recover(), must be implemented. 246: * 247: * @begin: Tell the platform driver that we're starting hibernation. 248: * Called right after shrinking memory and before freezing devices. 249: * 250: * @end: Called by the PM core right after resuming devices, to indicate to 251: * the platform that the system has returned to the working state. 252: * 253: * @pre_snapshot: Prepare the platform for creating the hibernation image. 254: * Called right after devices have been frozen and before the nonboot 255: * CPUs are disabled (runs with IRQs on). 256: * 257: * @finish: Restore the previous state of the platform after the hibernation 258: * image has been created *or* put the platform into the normal operation 259: * mode after the hibernation (the same method is executed in both cases). 260: * Called right after the nonboot CPUs have been enabled and before 261: * thawing devices (runs with IRQs on). 262: * 263: * @prepare: Prepare the platform for entering the low power state. 264: * Called right after the hibernation image has been saved and before 265: * devices are prepared for entering the low power state. 266: * 267: * @enter: Put the system into the low power state after the hibernation image 268: * has been saved to disk. 269: * Called after the nonboot CPUs have been disabled and all of the low 270: * level devices have been shut down (runs with IRQs off). 271: * 272: * @leave: Perform the first stage of the cleanup after the system sleep state 273: * indicated by @set_target() has been left. 274: * Called right after the control has been passed from the boot kernel to 275: * the image kernel, before the nonboot CPUs are enabled and before devices 276: * are resumed. Executed with interrupts disabled. 277: * 278: * @pre_restore: Prepare system for the restoration from a hibernation image. 279: * Called right after devices have been frozen and before the nonboot 280: * CPUs are disabled (runs with IRQs on). 281: * 282: * @restore_cleanup: Clean up after a failing image restoration. 283: * Called right after the nonboot CPUs have been enabled and before 284: * thawing devices (runs with IRQs on). 285: * 286: * @recover: Recover the platform from a failure to suspend devices. 287: * Called by the PM core if the suspending of devices during hibernation 288: * fails. This callback is optional and should only be implemented by 289: * platforms which require special recovery actions in that situation. 290: */ 291: struct platform_hibernation_ops { 292: int (*begin)(void); 293: void (*end)(void); 294: int (*pre_snapshot)(void); 295: void (*finish)(void); 296: int (*prepare)(void); 297: int (*enter)(void); 298: void (*leave)(void); 299: int (*pre_restore)(void); 300: void (*restore_cleanup)(void); 301: void (*recover)(void); 302: }; 303: 304: #ifdef CONFIG_HIBERNATION 305: /* kernel/power/snapshot.c */ 306: extern void __register_nosave_region(unsigned long b, unsigned long e, int km); 307: static inline void __init register_nosave_region(unsigned long b, unsigned long e) 308: { 309: __register_nosave_region(b, e, 0); 310: } 311: static inline void __init register_nosave_region_late(unsigned long b, unsigned long e) 312: { 313: __register_nosave_region(b, e, 1); 314: } 315: extern int swsusp_page_is_forbidden(struct page *); 316: extern void swsusp_set_page_free(struct page *); 317: extern void swsusp_unset_page_free(struct page *); 318: extern unsigned long get_safe_page(gfp_t gfp_mask); 319: 320: extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); 321: extern int hibernate(void); 322: extern bool system_entering_hibernation(void); 323: #else /* CONFIG_HIBERNATION */ 324: static inline void register_nosave_region(unsigned long b, unsigned long e) {} 325: static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} 326: static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } 327: static inline void swsusp_set_page_free(struct page *p) {} 328: static inline void swsusp_unset_page_free(struct page *p) {} 329: 330: static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} 331: static inline int hibernate(void) { return -ENOSYS; } 332: static inline bool system_entering_hibernation(void) { return false; } 333: #endif /* CONFIG_HIBERNATION */ 334: 335: /* Hibernation and suspend events */ 336: #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ 337: #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ 338: #define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */ 339: #define PM_POST_SUSPEND 0x0004 /* Suspend finished */ 340: #define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ 341: #define PM_POST_RESTORE 0x0006 /* Restore failed */ 342: 343: extern struct mutex pm_mutex; 344: 345: #ifdef CONFIG_PM_SLEEP 346: void save_processor_state(void); 347: void restore_processor_state(void); 348: 349: /* kernel/power/main.c */ 350: extern int register_pm_notifier(struct notifier_block *nb); 351: extern int unregister_pm_notifier(struct notifier_block *nb); 352: 353: #define pm_notifier(fn, pri) { \ 354: static struct notifier_block fn##_nb = \ 355: { .notifier_call = fn, .priority = pri }; \ 356: register_pm_notifier(&fn##_nb); \ 357: } 358: 359: /* drivers/base/power/wakeup.c */ 360: extern bool events_check_enabled; 361: 362: extern bool pm_wakeup_pending(void); 363: extern bool pm_get_wakeup_count(unsigned int *count, bool block); 364: extern bool pm_save_wakeup_count(unsigned int count); 365: extern void pm_wakep_autosleep_enabled(bool set); 366: extern void pm_print_active_wakeup_sources(void); 367: 368: static inline void lock_system_sleep(void) 369: { 370: current->flags |= PF_FREEZER_SKIP; 371: mutex_lock(&pm_mutex); 372: } 373: 374: static inline void unlock_system_sleep(void) 375: { 376: /* 377: * Don't use freezer_count() because we don't want the call to 378: * try_to_freeze() here. 379: * 380: * Reason: 381: * Fundamentally, we just don't need it, because freezing condition 382: * doesn't come into effect until we release the pm_mutex lock, 383: * since the freezer always works with pm_mutex held. 384: * 385: * More importantly, in the case of hibernation, 386: * unlock_system_sleep() gets called in snapshot_read() and 387: * snapshot_write() when the freezing condition is still in effect. 388: * Which means, if we use try_to_freeze() here, it would make them 389: * enter the refrigerator, thus causing hibernation to lockup. 390: */ 391: current->flags &= ~PF_FREEZER_SKIP; 392: mutex_unlock(&pm_mutex); 393: } 394: 395: #else /* !CONFIG_PM_SLEEP */ 396: 397: static inline int register_pm_notifier(struct notifier_block *nb) 398: { 399: return 0; 400: } 401: 402: static inline int unregister_pm_notifier(struct notifier_block *nb) 403: { 404: return 0; 405: } 406: 407: #define pm_notifier(fn, pri) do { (void)(fn); } while (0) 408: 409: static inline bool pm_wakeup_pending(void) { return false; } 410: 411: static inline void lock_system_sleep(void) {} 412: static inline void unlock_system_sleep(void) {} 413: 414: #endif /* !CONFIG_PM_SLEEP */ 415: 416: #ifdef CONFIG_PM_SLEEP_DEBUG 417: extern bool pm_print_times_enabled; 418: #else 419: #define pm_print_times_enabled (false) 420: #endif 421: 422: #ifdef CONFIG_PM_AUTOSLEEP 423: 424: /* kernel/power/autosleep.c */ 425: void queue_up_suspend_work(void); 426: 427: #else /* !CONFIG_PM_AUTOSLEEP */ 428: 429: static inline void queue_up_suspend_work(void) {} 430: 431: #endif /* !CONFIG_PM_AUTOSLEEP */ 432: 433: #ifdef CONFIG_ARCH_SAVE_PAGE_KEYS 434: /* 435: * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture 436: * to save/restore additional information to/from the array of page 437: * frame numbers in the hibernation image. For s390 this is used to 438: * save and restore the storage key for each page that is included 439: * in the hibernation image. 440: */ 441: unsigned long page_key_additional_pages(unsigned long pages); 442: int page_key_alloc(unsigned long pages); 443: void page_key_free(void); 444: void page_key_read(unsigned long *pfn); 445: void page_key_memorize(unsigned long *pfn); 446: void page_key_write(void *address); 447: 448: #else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ 449: 450: static inline unsigned long page_key_additional_pages(unsigned long pages) 451: { 452: return 0; 453: } 454: 455: static inline int page_key_alloc(unsigned long pages) 456: { 457: return 0; 458: } 459: 460: static inline void page_key_free(void) {} 461: static inline void page_key_read(unsigned long *pfn) {} 462: static inline void page_key_memorize(unsigned long *pfn) {} 463: static inline void page_key_write(void *address) {} 464: 465: #endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */ 466: 467: #endif /* _LINUX_SUSPEND_H */ 468: