1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/suspend.h>
3 #include <linux/suspend_ioctls.h>
4 #include <linux/utsname.h>
5 #include <linux/freezer.h>
6 #include <linux/compiler.h>
7 #include <linux/cpu.h>
8 #include <linux/cpuidle.h>
9 #include <linux/crypto.h>
10
11 struct swsusp_info {
12 struct new_utsname uts;
13 u32 version_code;
14 unsigned long num_physpages;
15 int cpus;
16 unsigned long image_pages;
17 unsigned long pages;
18 unsigned long size;
19 } __aligned(PAGE_SIZE);
20
21 #ifdef CONFIG_HIBERNATION
22 /* kernel/power/snapshot.c */
23 extern void __init hibernate_reserved_size_init(void);
24 extern void __init hibernate_image_size_init(void);
25
26 #ifdef CONFIG_ARCH_HIBERNATION_HEADER
27 /* Maximum size of architecture specific data in a hibernation header */
28 #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
29
init_header_complete(struct swsusp_info * info)30 static inline int init_header_complete(struct swsusp_info *info)
31 {
32 return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
33 }
34
check_image_kernel(struct swsusp_info * info)35 static inline const char *check_image_kernel(struct swsusp_info *info)
36 {
37 return arch_hibernation_header_restore(info) ?
38 "architecture specific data" : NULL;
39 }
40 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
41
42 /*
43 * Keep some memory free so that I/O operations can succeed without paging
44 * [Might this be more than 4 MB?]
45 */
46 #define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT)
47
48 /*
49 * Keep 1 MB of memory free so that device drivers can allocate some pages in
50 * their .suspend() routines without breaking the suspend to disk.
51 */
52 #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
53
54 asmlinkage int swsusp_save(void);
55
56 /* kernel/power/hibernate.c */
57 extern bool freezer_test_done;
58 extern char hib_comp_algo[CRYPTO_MAX_ALG_NAME];
59
60 /* kernel/power/swap.c */
61 extern unsigned int swsusp_header_flags;
62
63 extern int hibernation_snapshot(int platform_mode);
64 extern int hibernation_restore(int platform_mode);
65 extern int hibernation_platform_enter(void);
66
67 #ifdef CONFIG_STRICT_KERNEL_RWX
68 /* kernel/power/snapshot.c */
69 extern void enable_restore_image_protection(void);
70 #else
enable_restore_image_protection(void)71 static inline void enable_restore_image_protection(void) {}
72 #endif /* CONFIG_STRICT_KERNEL_RWX */
73
74 extern bool hibernation_in_progress(void);
75
76 #else /* !CONFIG_HIBERNATION */
77
hibernate_reserved_size_init(void)78 static inline void hibernate_reserved_size_init(void) {}
hibernate_image_size_init(void)79 static inline void hibernate_image_size_init(void) {}
80
hibernation_in_progress(void)81 static inline bool hibernation_in_progress(void) { return false; }
82 #endif /* !CONFIG_HIBERNATION */
83
84 #define power_attr(_name) \
85 static struct kobj_attribute _name##_attr = { \
86 .attr = { \
87 .name = __stringify(_name), \
88 .mode = 0644, \
89 }, \
90 .show = _name##_show, \
91 .store = _name##_store, \
92 }
93
94 #define power_attr_ro(_name) \
95 static struct kobj_attribute _name##_attr = { \
96 .attr = { \
97 .name = __stringify(_name), \
98 .mode = S_IRUGO, \
99 }, \
100 .show = _name##_show, \
101 }
102
103 /* Preferred image size in bytes (default 500 MB) */
104 extern unsigned long image_size;
105 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
106 extern unsigned long reserved_size;
107 extern int in_suspend;
108 extern dev_t swsusp_resume_device;
109 extern sector_t swsusp_resume_block;
110
111 extern int create_basic_memory_bitmaps(void);
112 extern void free_basic_memory_bitmaps(void);
113 extern int hibernate_preallocate_memory(void);
114
115 extern void clear_or_poison_free_pages(void);
116
117 /**
118 * Auxiliary structure used for reading the snapshot image data and
119 * metadata from and writing them to the list of page backup entries
120 * (PBEs) which is the main data structure of swsusp.
121 *
122 * Using struct snapshot_handle we can transfer the image, including its
123 * metadata, as a continuous sequence of bytes with the help of
124 * snapshot_read_next() and snapshot_write_next().
125 *
126 * The code that writes the image to a storage or transfers it to
127 * the user land is required to use snapshot_read_next() for this
128 * purpose and it should not make any assumptions regarding the internal
129 * structure of the image. Similarly, the code that reads the image from
130 * a storage or transfers it from the user land is required to use
131 * snapshot_write_next().
132 *
133 * This may allow us to change the internal structure of the image
134 * in the future with considerably less effort.
135 */
136
137 struct snapshot_handle {
138 unsigned int cur; /* number of the block of PAGE_SIZE bytes the
139 * next operation will refer to (ie. current)
140 */
141 void *buffer; /* address of the block to read from
142 * or write to
143 */
144 int sync_read; /* Set to one to notify the caller of
145 * snapshot_write_next() that it may
146 * need to call wait_on_bio_chain()
147 */
148 };
149
150 /* This macro returns the address from/to which the caller of
151 * snapshot_read_next()/snapshot_write_next() is allowed to
152 * read/write data after the function returns
153 */
154 #define data_of(handle) ((handle).buffer)
155
156 extern unsigned int snapshot_additional_pages(struct zone *zone);
157 extern unsigned long snapshot_get_image_size(void);
158 extern int snapshot_read_next(struct snapshot_handle *handle);
159 extern int snapshot_write_next(struct snapshot_handle *handle);
160 int snapshot_write_finalize(struct snapshot_handle *handle);
161 extern int snapshot_image_loaded(struct snapshot_handle *handle);
162
163 extern bool hibernate_acquire(void);
164 extern void hibernate_release(void);
165
166 extern sector_t alloc_swapdev_block(int swap);
167 extern void free_all_swap_pages(int swap);
168 extern int swsusp_swap_in_use(void);
169
170 /*
171 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
172 * the image header.
173 */
174 #define SF_COMPRESSION_ALG_LZO 0 /* dummy, details given below */
175 #define SF_PLATFORM_MODE 1
176 #define SF_NOCOMPRESS_MODE 2
177 #define SF_CRC32_MODE 4
178 #define SF_HW_SIG 8
179
180 /*
181 * Bit to indicate the compression algorithm to be used(for LZ4). The same
182 * could be checked while saving/loading image to/from disk to use the
183 * corresponding algorithms.
184 *
185 * By default, LZO compression is enabled if SF_CRC32_MODE is set. Use
186 * SF_COMPRESSION_ALG_LZ4 to override this behaviour and use LZ4.
187 *
188 * SF_CRC32_MODE, SF_COMPRESSION_ALG_LZO(dummy) -> Compression, LZO
189 * SF_CRC32_MODE, SF_COMPRESSION_ALG_LZ4 -> Compression, LZ4
190 */
191 #define SF_COMPRESSION_ALG_LZ4 16
192
193 /* kernel/power/hibernate.c */
194 int swsusp_check(bool exclusive);
195 extern void swsusp_free(void);
196 extern int swsusp_read(unsigned int *flags_p);
197 extern int swsusp_write(unsigned int flags);
198 void swsusp_close(void);
199 #ifdef CONFIG_SUSPEND
200 extern int swsusp_unmark(void);
201 #else
swsusp_unmark(void)202 static inline int swsusp_unmark(void) { return 0; }
203 #endif
204
205 struct __kernel_old_timeval;
206 /* kernel/power/swsusp.c */
207 extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *);
208
209 #ifdef CONFIG_SUSPEND
210 /* kernel/power/suspend.c */
211 extern const char * const pm_labels[];
212 extern const char *pm_states[];
213 extern const char *mem_sleep_states[];
214
215 extern int suspend_devices_and_enter(suspend_state_t state);
216 #else /* !CONFIG_SUSPEND */
217 #define mem_sleep_current PM_SUSPEND_ON
218
suspend_devices_and_enter(suspend_state_t state)219 static inline int suspend_devices_and_enter(suspend_state_t state)
220 {
221 return -ENOSYS;
222 }
223 #endif /* !CONFIG_SUSPEND */
224
225 #ifdef CONFIG_PM_TEST_SUSPEND
226 /* kernel/power/suspend_test.c */
227 extern void suspend_test_start(void);
228 extern void suspend_test_finish(const char *label);
229 #else /* !CONFIG_PM_TEST_SUSPEND */
suspend_test_start(void)230 static inline void suspend_test_start(void) {}
suspend_test_finish(const char * label)231 static inline void suspend_test_finish(const char *label) {}
232 #endif /* !CONFIG_PM_TEST_SUSPEND */
233
234 #ifdef CONFIG_PM_SLEEP
235 /* kernel/power/main.c */
236 extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down);
237 extern int pm_notifier_call_chain(unsigned long val);
238 void pm_restrict_gfp_mask(void);
239 void pm_restore_gfp_mask(void);
240 #else
pm_restrict_gfp_mask(void)241 static inline void pm_restrict_gfp_mask(void) {}
pm_restore_gfp_mask(void)242 static inline void pm_restore_gfp_mask(void) {}
243 #endif
244
245 #ifdef CONFIG_HIGHMEM
246 int restore_highmem(void);
247 #else
count_highmem_pages(void)248 static inline unsigned int count_highmem_pages(void) { return 0; }
restore_highmem(void)249 static inline int restore_highmem(void) { return 0; }
250 #endif
251
252 /*
253 * Suspend test levels
254 */
255 enum {
256 /* keep first */
257 TEST_NONE,
258 TEST_CORE,
259 TEST_CPUS,
260 TEST_PLATFORM,
261 TEST_DEVICES,
262 TEST_FREEZER,
263 /* keep last */
264 __TEST_AFTER_LAST
265 };
266
267 #define TEST_FIRST TEST_NONE
268 #define TEST_MAX (__TEST_AFTER_LAST - 1)
269
270 #ifdef CONFIG_PM_SLEEP_DEBUG
271 extern int pm_test_level;
272 #else
273 #define pm_test_level (TEST_NONE)
274 #endif
275
276 #ifdef CONFIG_SUSPEND_FREEZER
suspend_freeze_processes(void)277 static inline int suspend_freeze_processes(void)
278 {
279 int error;
280
281 error = freeze_processes();
282 /*
283 * freeze_processes() automatically thaws every task if freezing
284 * fails. So we need not do anything extra upon error.
285 */
286 if (error)
287 return error;
288
289 error = freeze_kernel_threads();
290 /*
291 * freeze_kernel_threads() thaws only kernel threads upon freezing
292 * failure. So we have to thaw the userspace tasks ourselves.
293 */
294 if (error)
295 thaw_processes();
296
297 return error;
298 }
299
suspend_thaw_processes(void)300 static inline void suspend_thaw_processes(void)
301 {
302 thaw_processes();
303 }
304 #else
suspend_freeze_processes(void)305 static inline int suspend_freeze_processes(void)
306 {
307 return 0;
308 }
309
suspend_thaw_processes(void)310 static inline void suspend_thaw_processes(void)
311 {
312 }
313 #endif
314
315 #ifdef CONFIG_PM_AUTOSLEEP
316
317 /* kernel/power/autosleep.c */
318 extern int pm_autosleep_init(void);
319 extern int pm_autosleep_lock(void);
320 extern void pm_autosleep_unlock(void);
321 extern suspend_state_t pm_autosleep_state(void);
322 extern int pm_autosleep_set_state(suspend_state_t state);
323
324 #else /* !CONFIG_PM_AUTOSLEEP */
325
pm_autosleep_init(void)326 static inline int pm_autosleep_init(void) { return 0; }
pm_autosleep_lock(void)327 static inline int pm_autosleep_lock(void) { return 0; }
pm_autosleep_unlock(void)328 static inline void pm_autosleep_unlock(void) {}
pm_autosleep_state(void)329 static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
330
331 #endif /* !CONFIG_PM_AUTOSLEEP */
332
333 #ifdef CONFIG_PM_WAKELOCKS
334
335 /* kernel/power/wakelock.c */
336 extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
337 extern int pm_wake_lock(const char *buf);
338 extern int pm_wake_unlock(const char *buf);
339
340 #endif /* !CONFIG_PM_WAKELOCKS */
341
pm_sleep_disable_secondary_cpus(void)342 static inline int pm_sleep_disable_secondary_cpus(void)
343 {
344 cpuidle_pause();
345 return suspend_disable_secondary_cpus();
346 }
347
pm_sleep_enable_secondary_cpus(void)348 static inline void pm_sleep_enable_secondary_cpus(void)
349 {
350 suspend_enable_secondary_cpus();
351 cpuidle_resume();
352 }
353