Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #include <linux/suspend.h> |
| 3 | #include <linux/suspend_ioctls.h> |
| 4 | #include <linux/utsname.h> |
| 5 | #include <linux/freezer.h> |
| 6 | #include <linux/compiler.h> |
| 7 | |
| 8 | struct swsusp_info { |
| 9 | struct new_utsname uts; |
| 10 | u32 version_code; |
| 11 | unsigned long num_physpages; |
| 12 | int cpus; |
| 13 | unsigned long image_pages; |
| 14 | unsigned long pages; |
| 15 | unsigned long size; |
| 16 | } __aligned(PAGE_SIZE); |
| 17 | |
| 18 | #ifdef CONFIG_HIBERNATION |
| 19 | /* kernel/power/snapshot.c */ |
| 20 | extern void __init hibernate_reserved_size_init(void); |
| 21 | extern void __init hibernate_image_size_init(void); |
| 22 | |
| 23 | #ifdef CONFIG_ARCH_HIBERNATION_HEADER |
| 24 | /* Maximum size of architecture specific data in a hibernation header */ |
| 25 | #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4) |
| 26 | |
| 27 | extern int arch_hibernation_header_save(void *addr, unsigned int max_size); |
| 28 | extern int arch_hibernation_header_restore(void *addr); |
| 29 | |
| 30 | static inline int init_header_complete(struct swsusp_info *info) |
| 31 | { |
| 32 | return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE); |
| 33 | } |
| 34 | |
| 35 | static inline char *check_image_kernel(struct swsusp_info *info) |
| 36 | { |
| 37 | return arch_hibernation_header_restore(info) ? |
| 38 | "architecture specific data" : NULL; |
| 39 | } |
| 40 | #endif /* CONFIG_ARCH_HIBERNATION_HEADER */ |
| 41 | |
| 42 | extern int hibernate_resume_nonboot_cpu_disable(void); |
| 43 | |
| 44 | /* |
| 45 | * Keep some memory free so that I/O operations can succeed without paging |
| 46 | * [Might this be more than 4 MB?] |
| 47 | */ |
| 48 | #define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT) |
| 49 | |
| 50 | /* |
| 51 | * Keep 1 MB of memory free so that device drivers can allocate some pages in |
| 52 | * their .suspend() routines without breaking the suspend to disk. |
| 53 | */ |
| 54 | #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) |
| 55 | |
| 56 | asmlinkage int swsusp_save(void); |
| 57 | |
| 58 | /* kernel/power/hibernate.c */ |
| 59 | extern bool freezer_test_done; |
| 60 | |
| 61 | extern int hibernation_snapshot(int platform_mode); |
| 62 | extern int hibernation_restore(int platform_mode); |
| 63 | extern int hibernation_platform_enter(void); |
| 64 | |
| 65 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 66 | /* kernel/power/snapshot.c */ |
| 67 | extern void enable_restore_image_protection(void); |
| 68 | #else |
| 69 | static inline void enable_restore_image_protection(void) {} |
| 70 | #endif /* CONFIG_STRICT_KERNEL_RWX */ |
| 71 | |
| 72 | #else /* !CONFIG_HIBERNATION */ |
| 73 | |
| 74 | static inline void hibernate_reserved_size_init(void) {} |
| 75 | static inline void hibernate_image_size_init(void) {} |
| 76 | #endif /* !CONFIG_HIBERNATION */ |
| 77 | |
| 78 | extern int pfn_is_nosave(unsigned long); |
| 79 | |
| 80 | #define power_attr(_name) \ |
| 81 | static struct kobj_attribute _name##_attr = { \ |
| 82 | .attr = { \ |
| 83 | .name = __stringify(_name), \ |
| 84 | .mode = 0644, \ |
| 85 | }, \ |
| 86 | .show = _name##_show, \ |
| 87 | .store = _name##_store, \ |
| 88 | } |
| 89 | |
| 90 | #define power_attr_ro(_name) \ |
| 91 | static struct kobj_attribute _name##_attr = { \ |
| 92 | .attr = { \ |
| 93 | .name = __stringify(_name), \ |
| 94 | .mode = S_IRUGO, \ |
| 95 | }, \ |
| 96 | .show = _name##_show, \ |
| 97 | } |
| 98 | |
| 99 | /* Preferred image size in bytes (default 500 MB) */ |
| 100 | extern unsigned long image_size; |
| 101 | /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */ |
| 102 | extern unsigned long reserved_size; |
| 103 | extern int in_suspend; |
| 104 | extern dev_t swsusp_resume_device; |
| 105 | extern sector_t swsusp_resume_block; |
| 106 | |
| 107 | extern int create_basic_memory_bitmaps(void); |
| 108 | extern void free_basic_memory_bitmaps(void); |
| 109 | extern int hibernate_preallocate_memory(void); |
| 110 | |
| 111 | extern void clear_free_pages(void); |
| 112 | |
| 113 | /** |
| 114 | * Auxiliary structure used for reading the snapshot image data and |
| 115 | * metadata from and writing them to the list of page backup entries |
| 116 | * (PBEs) which is the main data structure of swsusp. |
| 117 | * |
| 118 | * Using struct snapshot_handle we can transfer the image, including its |
| 119 | * metadata, as a continuous sequence of bytes with the help of |
| 120 | * snapshot_read_next() and snapshot_write_next(). |
| 121 | * |
| 122 | * The code that writes the image to a storage or transfers it to |
| 123 | * the user land is required to use snapshot_read_next() for this |
| 124 | * purpose and it should not make any assumptions regarding the internal |
| 125 | * structure of the image. Similarly, the code that reads the image from |
| 126 | * a storage or transfers it from the user land is required to use |
| 127 | * snapshot_write_next(). |
| 128 | * |
| 129 | * This may allow us to change the internal structure of the image |
| 130 | * in the future with considerably less effort. |
| 131 | */ |
| 132 | |
| 133 | struct snapshot_handle { |
| 134 | unsigned int cur; /* number of the block of PAGE_SIZE bytes the |
| 135 | * next operation will refer to (ie. current) |
| 136 | */ |
| 137 | void *buffer; /* address of the block to read from |
| 138 | * or write to |
| 139 | */ |
| 140 | int sync_read; /* Set to one to notify the caller of |
| 141 | * snapshot_write_next() that it may |
| 142 | * need to call wait_on_bio_chain() |
| 143 | */ |
| 144 | }; |
| 145 | |
| 146 | /* This macro returns the address from/to which the caller of |
| 147 | * snapshot_read_next()/snapshot_write_next() is allowed to |
| 148 | * read/write data after the function returns |
| 149 | */ |
| 150 | #define data_of(handle) ((handle).buffer) |
| 151 | |
| 152 | extern unsigned int snapshot_additional_pages(struct zone *zone); |
| 153 | extern unsigned long snapshot_get_image_size(void); |
| 154 | extern int snapshot_read_next(struct snapshot_handle *handle); |
| 155 | extern int snapshot_write_next(struct snapshot_handle *handle); |
| 156 | extern void snapshot_write_finalize(struct snapshot_handle *handle); |
| 157 | extern int snapshot_image_loaded(struct snapshot_handle *handle); |
| 158 | |
| 159 | /* If unset, the snapshot device cannot be open. */ |
| 160 | extern atomic_t snapshot_device_available; |
| 161 | |
| 162 | extern sector_t alloc_swapdev_block(int swap); |
| 163 | extern void free_all_swap_pages(int swap); |
| 164 | extern int swsusp_swap_in_use(void); |
| 165 | |
| 166 | /* |
| 167 | * Flags that can be passed from the hibernatig hernel to the "boot" kernel in |
| 168 | * the image header. |
| 169 | */ |
| 170 | #define SF_PLATFORM_MODE 1 |
| 171 | #define SF_NOCOMPRESS_MODE 2 |
| 172 | #define SF_CRC32_MODE 4 |
| 173 | |
| 174 | /* kernel/power/hibernate.c */ |
| 175 | extern int swsusp_check(void); |
| 176 | extern void swsusp_free(void); |
| 177 | extern int swsusp_read(unsigned int *flags_p); |
| 178 | extern int swsusp_write(unsigned int flags); |
| 179 | extern void swsusp_close(fmode_t); |
| 180 | #ifdef CONFIG_SUSPEND |
| 181 | extern int swsusp_unmark(void); |
| 182 | #endif |
| 183 | |
| 184 | struct timeval; |
| 185 | /* kernel/power/swsusp.c */ |
| 186 | extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *); |
| 187 | |
| 188 | #ifdef CONFIG_SUSPEND |
| 189 | /* kernel/power/suspend.c */ |
| 190 | extern const char * const pm_labels[]; |
| 191 | extern const char *pm_states[]; |
| 192 | extern const char *mem_sleep_states[]; |
| 193 | |
| 194 | extern int suspend_devices_and_enter(suspend_state_t state); |
| 195 | #else /* !CONFIG_SUSPEND */ |
| 196 | #define mem_sleep_current PM_SUSPEND_ON |
| 197 | |
| 198 | static inline int suspend_devices_and_enter(suspend_state_t state) |
| 199 | { |
| 200 | return -ENOSYS; |
| 201 | } |
| 202 | #endif /* !CONFIG_SUSPEND */ |
| 203 | |
| 204 | #ifdef CONFIG_PM_TEST_SUSPEND |
| 205 | /* kernel/power/suspend_test.c */ |
| 206 | extern void suspend_test_start(void); |
| 207 | extern void suspend_test_finish(const char *label); |
| 208 | #else /* !CONFIG_PM_TEST_SUSPEND */ |
| 209 | static inline void suspend_test_start(void) {} |
| 210 | static inline void suspend_test_finish(const char *label) {} |
| 211 | #endif /* !CONFIG_PM_TEST_SUSPEND */ |
| 212 | |
| 213 | #ifdef CONFIG_PM_SLEEP |
| 214 | /* kernel/power/main.c */ |
| 215 | extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call, |
| 216 | int *nr_calls); |
| 217 | extern int pm_notifier_call_chain(unsigned long val); |
| 218 | #endif |
| 219 | |
| 220 | #ifdef CONFIG_HIGHMEM |
| 221 | int restore_highmem(void); |
| 222 | #else |
| 223 | static inline unsigned int count_highmem_pages(void) { return 0; } |
| 224 | static inline int restore_highmem(void) { return 0; } |
| 225 | #endif |
| 226 | |
| 227 | /* |
| 228 | * Suspend test levels |
| 229 | */ |
| 230 | enum { |
| 231 | /* keep first */ |
| 232 | TEST_NONE, |
| 233 | TEST_CORE, |
| 234 | TEST_CPUS, |
| 235 | TEST_PLATFORM, |
| 236 | TEST_DEVICES, |
| 237 | TEST_FREEZER, |
| 238 | /* keep last */ |
| 239 | __TEST_AFTER_LAST |
| 240 | }; |
| 241 | |
| 242 | #define TEST_FIRST TEST_NONE |
| 243 | #define TEST_MAX (__TEST_AFTER_LAST - 1) |
| 244 | |
| 245 | #ifdef CONFIG_PM_SLEEP_DEBUG |
| 246 | extern int pm_test_level; |
| 247 | #else |
| 248 | #define pm_test_level (TEST_NONE) |
| 249 | #endif |
| 250 | |
| 251 | #ifdef CONFIG_SUSPEND_FREEZER |
| 252 | static inline int suspend_freeze_processes(void) |
| 253 | { |
| 254 | int error; |
| 255 | |
| 256 | error = freeze_processes(); |
| 257 | /* |
| 258 | * freeze_processes() automatically thaws every task if freezing |
| 259 | * fails. So we need not do anything extra upon error. |
| 260 | */ |
| 261 | if (error) |
| 262 | return error; |
| 263 | |
| 264 | error = freeze_kernel_threads(); |
| 265 | /* |
| 266 | * freeze_kernel_threads() thaws only kernel threads upon freezing |
| 267 | * failure. So we have to thaw the userspace tasks ourselves. |
| 268 | */ |
| 269 | if (error) |
| 270 | thaw_processes(); |
| 271 | |
| 272 | return error; |
| 273 | } |
| 274 | |
| 275 | static inline void suspend_thaw_processes(void) |
| 276 | { |
| 277 | thaw_processes(); |
| 278 | } |
| 279 | #else |
| 280 | static inline int suspend_freeze_processes(void) |
| 281 | { |
| 282 | return 0; |
| 283 | } |
| 284 | |
| 285 | static inline void suspend_thaw_processes(void) |
| 286 | { |
| 287 | } |
| 288 | #endif |
| 289 | |
| 290 | #ifdef CONFIG_PM_AUTOSLEEP |
| 291 | |
| 292 | /* kernel/power/autosleep.c */ |
| 293 | extern int pm_autosleep_init(void); |
| 294 | extern int pm_autosleep_lock(void); |
| 295 | extern void pm_autosleep_unlock(void); |
| 296 | extern suspend_state_t pm_autosleep_state(void); |
| 297 | extern int pm_autosleep_set_state(suspend_state_t state); |
| 298 | |
| 299 | #else /* !CONFIG_PM_AUTOSLEEP */ |
| 300 | |
| 301 | static inline int pm_autosleep_init(void) { return 0; } |
| 302 | static inline int pm_autosleep_lock(void) { return 0; } |
| 303 | static inline void pm_autosleep_unlock(void) {} |
| 304 | static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; } |
| 305 | |
| 306 | #endif /* !CONFIG_PM_AUTOSLEEP */ |
| 307 | |
| 308 | #ifdef CONFIG_PM_WAKELOCKS |
| 309 | |
| 310 | /* kernel/power/wakelock.c */ |
| 311 | extern ssize_t pm_show_wakelocks(char *buf, bool show_active); |
| 312 | extern int pm_wake_lock(const char *buf); |
| 313 | extern int pm_wake_unlock(const char *buf); |
| 314 | |
| 315 | #endif /* !CONFIG_PM_WAKELOCKS */ |