• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/kernel/power/snapshot.c
4  *
5  * This file provides system snapshot/restore functionality for swsusp.
6  *
7  * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9  */
10 
11 #define pr_fmt(fmt) "PM: hibernation: " fmt
12 
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/memblock.h>
25 #include <linux/nmi.h>
26 #include <linux/syscalls.h>
27 #include <linux/console.h>
28 #include <linux/highmem.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
31 #include <linux/compiler.h>
32 #include <linux/ktime.h>
33 #include <linux/set_memory.h>
34 
35 #include <linux/uaccess.h>
36 #include <asm/mmu_context.h>
37 #include <asm/tlbflush.h>
38 #include <asm/io.h>
39 
40 #include "power.h"
41 
42 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
43 static bool hibernate_restore_protection;
44 static bool hibernate_restore_protection_active;
45 
enable_restore_image_protection(void)46 void enable_restore_image_protection(void)
47 {
48 	hibernate_restore_protection = true;
49 }
50 
hibernate_restore_protection_begin(void)51 static inline void hibernate_restore_protection_begin(void)
52 {
53 	hibernate_restore_protection_active = hibernate_restore_protection;
54 }
55 
hibernate_restore_protection_end(void)56 static inline void hibernate_restore_protection_end(void)
57 {
58 	hibernate_restore_protection_active = false;
59 }
60 
hibernate_restore_protect_page(void * page_address)61 static inline void hibernate_restore_protect_page(void *page_address)
62 {
63 	if (hibernate_restore_protection_active)
64 		set_memory_ro((unsigned long)page_address, 1);
65 }
66 
hibernate_restore_unprotect_page(void * page_address)67 static inline void hibernate_restore_unprotect_page(void *page_address)
68 {
69 	if (hibernate_restore_protection_active)
70 		set_memory_rw((unsigned long)page_address, 1);
71 }
72 #else
hibernate_restore_protection_begin(void)73 static inline void hibernate_restore_protection_begin(void) {}
hibernate_restore_protection_end(void)74 static inline void hibernate_restore_protection_end(void) {}
hibernate_restore_protect_page(void * page_address)75 static inline void hibernate_restore_protect_page(void *page_address) {}
hibernate_restore_unprotect_page(void * page_address)76 static inline void hibernate_restore_unprotect_page(void *page_address) {}
77 #endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
78 
79 static int swsusp_page_is_free(struct page *);
80 static void swsusp_set_page_forbidden(struct page *);
81 static void swsusp_unset_page_forbidden(struct page *);
82 
83 /*
84  * Number of bytes to reserve for memory allocations made by device drivers
85  * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
86  * cause image creation to fail (tunable via /sys/power/reserved_size).
87  */
88 unsigned long reserved_size;
89 
hibernate_reserved_size_init(void)90 void __init hibernate_reserved_size_init(void)
91 {
92 	reserved_size = SPARE_PAGES * PAGE_SIZE;
93 }
94 
95 /*
96  * Preferred image size in bytes (tunable via /sys/power/image_size).
97  * When it is set to N, swsusp will do its best to ensure the image
98  * size will not exceed N bytes, but if that is impossible, it will
99  * try to create the smallest image possible.
100  */
101 unsigned long image_size;
102 
hibernate_image_size_init(void)103 void __init hibernate_image_size_init(void)
104 {
105 	image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
106 }
107 
108 /*
109  * List of PBEs needed for restoring the pages that were allocated before
110  * the suspend and included in the suspend image, but have also been
111  * allocated by the "resume" kernel, so their contents cannot be written
112  * directly to their "original" page frames.
113  */
114 struct pbe *restore_pblist;
115 
116 /* struct linked_page is used to build chains of pages */
117 
118 #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
119 
120 struct linked_page {
121 	struct linked_page *next;
122 	char data[LINKED_PAGE_DATA_SIZE];
123 } __packed;
124 
125 /*
126  * List of "safe" pages (ie. pages that were not used by the image kernel
127  * before hibernation) that may be used as temporary storage for image kernel
128  * memory contents.
129  */
130 static struct linked_page *safe_pages_list;
131 
132 /* Pointer to an auxiliary buffer (1 page) */
133 static void *buffer;
134 
135 #define PG_ANY		0
136 #define PG_SAFE		1
137 #define PG_UNSAFE_CLEAR	1
138 #define PG_UNSAFE_KEEP	0
139 
140 static unsigned int allocated_unsafe_pages;
141 
142 /**
143  * get_image_page - Allocate a page for a hibernation image.
144  * @gfp_mask: GFP mask for the allocation.
145  * @safe_needed: Get pages that were not used before hibernation (restore only)
146  *
147  * During image restoration, for storing the PBE list and the image data, we can
148  * only use memory pages that do not conflict with the pages used before
149  * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
150  * using allocated_unsafe_pages.
151  *
152  * Each allocated image page is marked as PageNosave and PageNosaveFree so that
153  * swsusp_free() can release it.
154  */
get_image_page(gfp_t gfp_mask,int safe_needed)155 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
156 {
157 	void *res;
158 
159 	res = (void *)get_zeroed_page(gfp_mask);
160 	if (safe_needed)
161 		while (res && swsusp_page_is_free(virt_to_page(res))) {
162 			/* The page is unsafe, mark it for swsusp_free() */
163 			swsusp_set_page_forbidden(virt_to_page(res));
164 			allocated_unsafe_pages++;
165 			res = (void *)get_zeroed_page(gfp_mask);
166 		}
167 	if (res) {
168 		swsusp_set_page_forbidden(virt_to_page(res));
169 		swsusp_set_page_free(virt_to_page(res));
170 	}
171 	return res;
172 }
173 
__get_safe_page(gfp_t gfp_mask)174 static void *__get_safe_page(gfp_t gfp_mask)
175 {
176 	if (safe_pages_list) {
177 		void *ret = safe_pages_list;
178 
179 		safe_pages_list = safe_pages_list->next;
180 		memset(ret, 0, PAGE_SIZE);
181 		return ret;
182 	}
183 	return get_image_page(gfp_mask, PG_SAFE);
184 }
185 
get_safe_page(gfp_t gfp_mask)186 unsigned long get_safe_page(gfp_t gfp_mask)
187 {
188 	return (unsigned long)__get_safe_page(gfp_mask);
189 }
190 
alloc_image_page(gfp_t gfp_mask)191 static struct page *alloc_image_page(gfp_t gfp_mask)
192 {
193 	struct page *page;
194 
195 	page = alloc_page(gfp_mask);
196 	if (page) {
197 		swsusp_set_page_forbidden(page);
198 		swsusp_set_page_free(page);
199 	}
200 	return page;
201 }
202 
recycle_safe_page(void * page_address)203 static void recycle_safe_page(void *page_address)
204 {
205 	struct linked_page *lp = page_address;
206 
207 	lp->next = safe_pages_list;
208 	safe_pages_list = lp;
209 }
210 
211 /**
212  * free_image_page - Free a page allocated for hibernation image.
213  * @addr: Address of the page to free.
214  * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
215  *
216  * The page to free should have been allocated by get_image_page() (page flags
217  * set by it are affected).
218  */
free_image_page(void * addr,int clear_nosave_free)219 static inline void free_image_page(void *addr, int clear_nosave_free)
220 {
221 	struct page *page;
222 
223 	BUG_ON(!virt_addr_valid(addr));
224 
225 	page = virt_to_page(addr);
226 
227 	swsusp_unset_page_forbidden(page);
228 	if (clear_nosave_free)
229 		swsusp_unset_page_free(page);
230 
231 	__free_page(page);
232 }
233 
free_list_of_pages(struct linked_page * list,int clear_page_nosave)234 static inline void free_list_of_pages(struct linked_page *list,
235 				      int clear_page_nosave)
236 {
237 	while (list) {
238 		struct linked_page *lp = list->next;
239 
240 		free_image_page(list, clear_page_nosave);
241 		list = lp;
242 	}
243 }
244 
245 /*
246  * struct chain_allocator is used for allocating small objects out of
247  * a linked list of pages called 'the chain'.
248  *
249  * The chain grows each time when there is no room for a new object in
250  * the current page.  The allocated objects cannot be freed individually.
251  * It is only possible to free them all at once, by freeing the entire
252  * chain.
253  *
254  * NOTE: The chain allocator may be inefficient if the allocated objects
255  * are not much smaller than PAGE_SIZE.
256  */
257 struct chain_allocator {
258 	struct linked_page *chain;	/* the chain */
259 	unsigned int used_space;	/* total size of objects allocated out
260 					   of the current page */
261 	gfp_t gfp_mask;		/* mask for allocating pages */
262 	int safe_needed;	/* if set, only "safe" pages are allocated */
263 };
264 
chain_init(struct chain_allocator * ca,gfp_t gfp_mask,int safe_needed)265 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
266 		       int safe_needed)
267 {
268 	ca->chain = NULL;
269 	ca->used_space = LINKED_PAGE_DATA_SIZE;
270 	ca->gfp_mask = gfp_mask;
271 	ca->safe_needed = safe_needed;
272 }
273 
chain_alloc(struct chain_allocator * ca,unsigned int size)274 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
275 {
276 	void *ret;
277 
278 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
279 		struct linked_page *lp;
280 
281 		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
282 					get_image_page(ca->gfp_mask, PG_ANY);
283 		if (!lp)
284 			return NULL;
285 
286 		lp->next = ca->chain;
287 		ca->chain = lp;
288 		ca->used_space = 0;
289 	}
290 	ret = ca->chain->data + ca->used_space;
291 	ca->used_space += size;
292 	return ret;
293 }
294 
295 /**
296  * Data types related to memory bitmaps.
297  *
298  * Memory bitmap is a structure consiting of many linked lists of
299  * objects.  The main list's elements are of type struct zone_bitmap
300  * and each of them corresonds to one zone.  For each zone bitmap
301  * object there is a list of objects of type struct bm_block that
302  * represent each blocks of bitmap in which information is stored.
303  *
304  * struct memory_bitmap contains a pointer to the main list of zone
305  * bitmap objects, a struct bm_position used for browsing the bitmap,
306  * and a pointer to the list of pages used for allocating all of the
307  * zone bitmap objects and bitmap block objects.
308  *
309  * NOTE: It has to be possible to lay out the bitmap in memory
310  * using only allocations of order 0.  Additionally, the bitmap is
311  * designed to work with arbitrary number of zones (this is over the
312  * top for now, but let's avoid making unnecessary assumptions ;-).
313  *
314  * struct zone_bitmap contains a pointer to a list of bitmap block
315  * objects and a pointer to the bitmap block object that has been
316  * most recently used for setting bits.  Additionally, it contains the
317  * PFNs that correspond to the start and end of the represented zone.
318  *
319  * struct bm_block contains a pointer to the memory page in which
320  * information is stored (in the form of a block of bitmap)
321  * It also contains the pfns that correspond to the start and end of
322  * the represented memory area.
323  *
324  * The memory bitmap is organized as a radix tree to guarantee fast random
325  * access to the bits. There is one radix tree for each zone (as returned
326  * from create_mem_extents).
327  *
328  * One radix tree is represented by one struct mem_zone_bm_rtree. There are
329  * two linked lists for the nodes of the tree, one for the inner nodes and
330  * one for the leave nodes. The linked leave nodes are used for fast linear
331  * access of the memory bitmap.
332  *
333  * The struct rtree_node represents one node of the radix tree.
334  */
335 
336 #define BM_END_OF_MAP	(~0UL)
337 
338 #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
339 #define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
340 #define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
341 
342 /*
343  * struct rtree_node is a wrapper struct to link the nodes
344  * of the rtree together for easy linear iteration over
345  * bits and easy freeing
346  */
347 struct rtree_node {
348 	struct list_head list;
349 	unsigned long *data;
350 };
351 
352 /*
353  * struct mem_zone_bm_rtree represents a bitmap used for one
354  * populated memory zone.
355  */
356 struct mem_zone_bm_rtree {
357 	struct list_head list;		/* Link Zones together         */
358 	struct list_head nodes;		/* Radix Tree inner nodes      */
359 	struct list_head leaves;	/* Radix Tree leaves           */
360 	unsigned long start_pfn;	/* Zone start page frame       */
361 	unsigned long end_pfn;		/* Zone end page frame + 1     */
362 	struct rtree_node *rtree;	/* Radix Tree Root             */
363 	int levels;			/* Number of Radix Tree Levels */
364 	unsigned int blocks;		/* Number of Bitmap Blocks     */
365 };
366 
367 /* strcut bm_position is used for browsing memory bitmaps */
368 
369 struct bm_position {
370 	struct mem_zone_bm_rtree *zone;
371 	struct rtree_node *node;
372 	unsigned long node_pfn;
373 	int node_bit;
374 };
375 
376 struct memory_bitmap {
377 	struct list_head zones;
378 	struct linked_page *p_list;	/* list of pages used to store zone
379 					   bitmap objects and bitmap block
380 					   objects */
381 	struct bm_position cur;	/* most recently used bit position */
382 };
383 
384 /* Functions that operate on memory bitmaps */
385 
386 #define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
387 #if BITS_PER_LONG == 32
388 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
389 #else
390 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
391 #endif
392 #define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
393 
394 /**
395  * alloc_rtree_node - Allocate a new node and add it to the radix tree.
396  *
397  * This function is used to allocate inner nodes as well as the
398  * leave nodes of the radix tree. It also adds the node to the
399  * corresponding linked list passed in by the *list parameter.
400  */
alloc_rtree_node(gfp_t gfp_mask,int safe_needed,struct chain_allocator * ca,struct list_head * list)401 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
402 					   struct chain_allocator *ca,
403 					   struct list_head *list)
404 {
405 	struct rtree_node *node;
406 
407 	node = chain_alloc(ca, sizeof(struct rtree_node));
408 	if (!node)
409 		return NULL;
410 
411 	node->data = get_image_page(gfp_mask, safe_needed);
412 	if (!node->data)
413 		return NULL;
414 
415 	list_add_tail(&node->list, list);
416 
417 	return node;
418 }
419 
420 /**
421  * add_rtree_block - Add a new leave node to the radix tree.
422  *
423  * The leave nodes need to be allocated in order to keep the leaves
424  * linked list in order. This is guaranteed by the zone->blocks
425  * counter.
426  */
add_rtree_block(struct mem_zone_bm_rtree * zone,gfp_t gfp_mask,int safe_needed,struct chain_allocator * ca)427 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
428 			   int safe_needed, struct chain_allocator *ca)
429 {
430 	struct rtree_node *node, *block, **dst;
431 	unsigned int levels_needed, block_nr;
432 	int i;
433 
434 	block_nr = zone->blocks;
435 	levels_needed = 0;
436 
437 	/* How many levels do we need for this block nr? */
438 	while (block_nr) {
439 		levels_needed += 1;
440 		block_nr >>= BM_RTREE_LEVEL_SHIFT;
441 	}
442 
443 	/* Make sure the rtree has enough levels */
444 	for (i = zone->levels; i < levels_needed; i++) {
445 		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
446 					&zone->nodes);
447 		if (!node)
448 			return -ENOMEM;
449 
450 		node->data[0] = (unsigned long)zone->rtree;
451 		zone->rtree = node;
452 		zone->levels += 1;
453 	}
454 
455 	/* Allocate new block */
456 	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
457 	if (!block)
458 		return -ENOMEM;
459 
460 	/* Now walk the rtree to insert the block */
461 	node = zone->rtree;
462 	dst = &zone->rtree;
463 	block_nr = zone->blocks;
464 	for (i = zone->levels; i > 0; i--) {
465 		int index;
466 
467 		if (!node) {
468 			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
469 						&zone->nodes);
470 			if (!node)
471 				return -ENOMEM;
472 			*dst = node;
473 		}
474 
475 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
476 		index &= BM_RTREE_LEVEL_MASK;
477 		dst = (struct rtree_node **)&((*dst)->data[index]);
478 		node = *dst;
479 	}
480 
481 	zone->blocks += 1;
482 	*dst = block;
483 
484 	return 0;
485 }
486 
487 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
488 			       int clear_nosave_free);
489 
490 /**
491  * create_zone_bm_rtree - Create a radix tree for one zone.
492  *
493  * Allocated the mem_zone_bm_rtree structure and initializes it.
494  * This function also allocated and builds the radix tree for the
495  * zone.
496  */
create_zone_bm_rtree(gfp_t gfp_mask,int safe_needed,struct chain_allocator * ca,unsigned long start,unsigned long end)497 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
498 						      int safe_needed,
499 						      struct chain_allocator *ca,
500 						      unsigned long start,
501 						      unsigned long end)
502 {
503 	struct mem_zone_bm_rtree *zone;
504 	unsigned int i, nr_blocks;
505 	unsigned long pages;
506 
507 	pages = end - start;
508 	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
509 	if (!zone)
510 		return NULL;
511 
512 	INIT_LIST_HEAD(&zone->nodes);
513 	INIT_LIST_HEAD(&zone->leaves);
514 	zone->start_pfn = start;
515 	zone->end_pfn = end;
516 	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
517 
518 	for (i = 0; i < nr_blocks; i++) {
519 		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
520 			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
521 			return NULL;
522 		}
523 	}
524 
525 	return zone;
526 }
527 
528 /**
529  * free_zone_bm_rtree - Free the memory of the radix tree.
530  *
531  * Free all node pages of the radix tree. The mem_zone_bm_rtree
532  * structure itself is not freed here nor are the rtree_node
533  * structs.
534  */
free_zone_bm_rtree(struct mem_zone_bm_rtree * zone,int clear_nosave_free)535 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
536 			       int clear_nosave_free)
537 {
538 	struct rtree_node *node;
539 
540 	list_for_each_entry(node, &zone->nodes, list)
541 		free_image_page(node->data, clear_nosave_free);
542 
543 	list_for_each_entry(node, &zone->leaves, list)
544 		free_image_page(node->data, clear_nosave_free);
545 }
546 
memory_bm_position_reset(struct memory_bitmap * bm)547 static void memory_bm_position_reset(struct memory_bitmap *bm)
548 {
549 	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
550 				  list);
551 	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
552 				  struct rtree_node, list);
553 	bm->cur.node_pfn = 0;
554 	bm->cur.node_bit = 0;
555 }
556 
557 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
558 
559 struct mem_extent {
560 	struct list_head hook;
561 	unsigned long start;
562 	unsigned long end;
563 };
564 
565 /**
566  * free_mem_extents - Free a list of memory extents.
567  * @list: List of extents to free.
568  */
free_mem_extents(struct list_head * list)569 static void free_mem_extents(struct list_head *list)
570 {
571 	struct mem_extent *ext, *aux;
572 
573 	list_for_each_entry_safe(ext, aux, list, hook) {
574 		list_del(&ext->hook);
575 		kfree(ext);
576 	}
577 }
578 
579 /**
580  * create_mem_extents - Create a list of memory extents.
581  * @list: List to put the extents into.
582  * @gfp_mask: Mask to use for memory allocations.
583  *
584  * The extents represent contiguous ranges of PFNs.
585  */
create_mem_extents(struct list_head * list,gfp_t gfp_mask)586 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
587 {
588 	struct zone *zone;
589 
590 	INIT_LIST_HEAD(list);
591 
592 	for_each_populated_zone(zone) {
593 		unsigned long zone_start, zone_end;
594 		struct mem_extent *ext, *cur, *aux;
595 
596 		zone_start = zone->zone_start_pfn;
597 		zone_end = zone_end_pfn(zone);
598 
599 		list_for_each_entry(ext, list, hook)
600 			if (zone_start <= ext->end)
601 				break;
602 
603 		if (&ext->hook == list || zone_end < ext->start) {
604 			/* New extent is necessary */
605 			struct mem_extent *new_ext;
606 
607 			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
608 			if (!new_ext) {
609 				free_mem_extents(list);
610 				return -ENOMEM;
611 			}
612 			new_ext->start = zone_start;
613 			new_ext->end = zone_end;
614 			list_add_tail(&new_ext->hook, &ext->hook);
615 			continue;
616 		}
617 
618 		/* Merge this zone's range of PFNs with the existing one */
619 		if (zone_start < ext->start)
620 			ext->start = zone_start;
621 		if (zone_end > ext->end)
622 			ext->end = zone_end;
623 
624 		/* More merging may be possible */
625 		cur = ext;
626 		list_for_each_entry_safe_continue(cur, aux, list, hook) {
627 			if (zone_end < cur->start)
628 				break;
629 			if (zone_end < cur->end)
630 				ext->end = cur->end;
631 			list_del(&cur->hook);
632 			kfree(cur);
633 		}
634 	}
635 
636 	return 0;
637 }
638 
639 /**
640  * memory_bm_create - Allocate memory for a memory bitmap.
641  */
memory_bm_create(struct memory_bitmap * bm,gfp_t gfp_mask,int safe_needed)642 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
643 			    int safe_needed)
644 {
645 	struct chain_allocator ca;
646 	struct list_head mem_extents;
647 	struct mem_extent *ext;
648 	int error;
649 
650 	chain_init(&ca, gfp_mask, safe_needed);
651 	INIT_LIST_HEAD(&bm->zones);
652 
653 	error = create_mem_extents(&mem_extents, gfp_mask);
654 	if (error)
655 		return error;
656 
657 	list_for_each_entry(ext, &mem_extents, hook) {
658 		struct mem_zone_bm_rtree *zone;
659 
660 		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
661 					    ext->start, ext->end);
662 		if (!zone) {
663 			error = -ENOMEM;
664 			goto Error;
665 		}
666 		list_add_tail(&zone->list, &bm->zones);
667 	}
668 
669 	bm->p_list = ca.chain;
670 	memory_bm_position_reset(bm);
671  Exit:
672 	free_mem_extents(&mem_extents);
673 	return error;
674 
675  Error:
676 	bm->p_list = ca.chain;
677 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
678 	goto Exit;
679 }
680 
681 /**
682  * memory_bm_free - Free memory occupied by the memory bitmap.
683  * @bm: Memory bitmap.
684  */
memory_bm_free(struct memory_bitmap * bm,int clear_nosave_free)685 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
686 {
687 	struct mem_zone_bm_rtree *zone;
688 
689 	list_for_each_entry(zone, &bm->zones, list)
690 		free_zone_bm_rtree(zone, clear_nosave_free);
691 
692 	free_list_of_pages(bm->p_list, clear_nosave_free);
693 
694 	INIT_LIST_HEAD(&bm->zones);
695 }
696 
697 /**
698  * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
699  *
700  * Find the bit in memory bitmap @bm that corresponds to the given PFN.
701  * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
702  *
703  * Walk the radix tree to find the page containing the bit that represents @pfn
704  * and return the position of the bit in @addr and @bit_nr.
705  */
memory_bm_find_bit(struct memory_bitmap * bm,unsigned long pfn,void ** addr,unsigned int * bit_nr)706 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
707 			      void **addr, unsigned int *bit_nr)
708 {
709 	struct mem_zone_bm_rtree *curr, *zone;
710 	struct rtree_node *node;
711 	int i, block_nr;
712 
713 	zone = bm->cur.zone;
714 
715 	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
716 		goto zone_found;
717 
718 	zone = NULL;
719 
720 	/* Find the right zone */
721 	list_for_each_entry(curr, &bm->zones, list) {
722 		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
723 			zone = curr;
724 			break;
725 		}
726 	}
727 
728 	if (!zone)
729 		return -EFAULT;
730 
731 zone_found:
732 	/*
733 	 * We have found the zone. Now walk the radix tree to find the leaf node
734 	 * for our PFN.
735 	 */
736 
737 	/*
738 	 * If the zone we wish to scan is the current zone and the
739 	 * pfn falls into the current node then we do not need to walk
740 	 * the tree.
741 	 */
742 	node = bm->cur.node;
743 	if (zone == bm->cur.zone &&
744 	    ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
745 		goto node_found;
746 
747 	node      = zone->rtree;
748 	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
749 
750 	for (i = zone->levels; i > 0; i--) {
751 		int index;
752 
753 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
754 		index &= BM_RTREE_LEVEL_MASK;
755 		BUG_ON(node->data[index] == 0);
756 		node = (struct rtree_node *)node->data[index];
757 	}
758 
759 node_found:
760 	/* Update last position */
761 	bm->cur.zone = zone;
762 	bm->cur.node = node;
763 	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
764 
765 	/* Set return values */
766 	*addr = node->data;
767 	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
768 
769 	return 0;
770 }
771 
memory_bm_set_bit(struct memory_bitmap * bm,unsigned long pfn)772 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
773 {
774 	void *addr;
775 	unsigned int bit;
776 	int error;
777 
778 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
779 	BUG_ON(error);
780 	set_bit(bit, addr);
781 }
782 
mem_bm_set_bit_check(struct memory_bitmap * bm,unsigned long pfn)783 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
784 {
785 	void *addr;
786 	unsigned int bit;
787 	int error;
788 
789 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
790 	if (!error)
791 		set_bit(bit, addr);
792 
793 	return error;
794 }
795 
memory_bm_clear_bit(struct memory_bitmap * bm,unsigned long pfn)796 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
797 {
798 	void *addr;
799 	unsigned int bit;
800 	int error;
801 
802 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
803 	BUG_ON(error);
804 	clear_bit(bit, addr);
805 }
806 
memory_bm_clear_current(struct memory_bitmap * bm)807 static void memory_bm_clear_current(struct memory_bitmap *bm)
808 {
809 	int bit;
810 
811 	bit = max(bm->cur.node_bit - 1, 0);
812 	clear_bit(bit, bm->cur.node->data);
813 }
814 
memory_bm_test_bit(struct memory_bitmap * bm,unsigned long pfn)815 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
816 {
817 	void *addr;
818 	unsigned int bit;
819 	int error;
820 
821 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
822 	BUG_ON(error);
823 	return test_bit(bit, addr);
824 }
825 
memory_bm_pfn_present(struct memory_bitmap * bm,unsigned long pfn)826 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
827 {
828 	void *addr;
829 	unsigned int bit;
830 
831 	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
832 }
833 
834 /*
835  * rtree_next_node - Jump to the next leaf node.
836  *
837  * Set the position to the beginning of the next node in the
838  * memory bitmap. This is either the next node in the current
839  * zone's radix tree or the first node in the radix tree of the
840  * next zone.
841  *
842  * Return true if there is a next node, false otherwise.
843  */
rtree_next_node(struct memory_bitmap * bm)844 static bool rtree_next_node(struct memory_bitmap *bm)
845 {
846 	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
847 		bm->cur.node = list_entry(bm->cur.node->list.next,
848 					  struct rtree_node, list);
849 		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
850 		bm->cur.node_bit  = 0;
851 		touch_softlockup_watchdog();
852 		return true;
853 	}
854 
855 	/* No more nodes, goto next zone */
856 	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
857 		bm->cur.zone = list_entry(bm->cur.zone->list.next,
858 				  struct mem_zone_bm_rtree, list);
859 		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
860 					  struct rtree_node, list);
861 		bm->cur.node_pfn = 0;
862 		bm->cur.node_bit = 0;
863 		return true;
864 	}
865 
866 	/* No more zones */
867 	return false;
868 }
869 
870 /**
871  * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
872  * @bm: Memory bitmap.
873  *
874  * Starting from the last returned position this function searches for the next
875  * set bit in @bm and returns the PFN represented by it.  If no more bits are
876  * set, BM_END_OF_MAP is returned.
877  *
878  * It is required to run memory_bm_position_reset() before the first call to
879  * this function for the given memory bitmap.
880  */
memory_bm_next_pfn(struct memory_bitmap * bm)881 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
882 {
883 	unsigned long bits, pfn, pages;
884 	int bit;
885 
886 	do {
887 		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
888 		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
889 		bit	  = find_next_bit(bm->cur.node->data, bits,
890 					  bm->cur.node_bit);
891 		if (bit < bits) {
892 			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
893 			bm->cur.node_bit = bit + 1;
894 			return pfn;
895 		}
896 	} while (rtree_next_node(bm));
897 
898 	return BM_END_OF_MAP;
899 }
900 
901 /*
902  * This structure represents a range of page frames the contents of which
903  * should not be saved during hibernation.
904  */
905 struct nosave_region {
906 	struct list_head list;
907 	unsigned long start_pfn;
908 	unsigned long end_pfn;
909 };
910 
911 static LIST_HEAD(nosave_regions);
912 
recycle_zone_bm_rtree(struct mem_zone_bm_rtree * zone)913 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
914 {
915 	struct rtree_node *node;
916 
917 	list_for_each_entry(node, &zone->nodes, list)
918 		recycle_safe_page(node->data);
919 
920 	list_for_each_entry(node, &zone->leaves, list)
921 		recycle_safe_page(node->data);
922 }
923 
memory_bm_recycle(struct memory_bitmap * bm)924 static void memory_bm_recycle(struct memory_bitmap *bm)
925 {
926 	struct mem_zone_bm_rtree *zone;
927 	struct linked_page *p_list;
928 
929 	list_for_each_entry(zone, &bm->zones, list)
930 		recycle_zone_bm_rtree(zone);
931 
932 	p_list = bm->p_list;
933 	while (p_list) {
934 		struct linked_page *lp = p_list;
935 
936 		p_list = lp->next;
937 		recycle_safe_page(lp);
938 	}
939 }
940 
941 /**
942  * register_nosave_region - Register a region of unsaveable memory.
943  *
944  * Register a range of page frames the contents of which should not be saved
945  * during hibernation (to be used in the early initialization code).
946  */
register_nosave_region(unsigned long start_pfn,unsigned long end_pfn)947 void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
948 {
949 	struct nosave_region *region;
950 
951 	if (start_pfn >= end_pfn)
952 		return;
953 
954 	if (!list_empty(&nosave_regions)) {
955 		/* Try to extend the previous region (they should be sorted) */
956 		region = list_entry(nosave_regions.prev,
957 					struct nosave_region, list);
958 		if (region->end_pfn == start_pfn) {
959 			region->end_pfn = end_pfn;
960 			goto Report;
961 		}
962 	}
963 	/* This allocation cannot fail */
964 	region = memblock_alloc(sizeof(struct nosave_region),
965 				SMP_CACHE_BYTES);
966 	if (!region)
967 		panic("%s: Failed to allocate %zu bytes\n", __func__,
968 		      sizeof(struct nosave_region));
969 	region->start_pfn = start_pfn;
970 	region->end_pfn = end_pfn;
971 	list_add_tail(&region->list, &nosave_regions);
972  Report:
973 	pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
974 		(unsigned long long) start_pfn << PAGE_SHIFT,
975 		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
976 }
977 
978 /*
979  * Set bits in this map correspond to the page frames the contents of which
980  * should not be saved during the suspend.
981  */
982 static struct memory_bitmap *forbidden_pages_map;
983 
984 /* Set bits in this map correspond to free page frames. */
985 static struct memory_bitmap *free_pages_map;
986 
987 /*
988  * Each page frame allocated for creating the image is marked by setting the
989  * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
990  */
991 
swsusp_set_page_free(struct page * page)992 void swsusp_set_page_free(struct page *page)
993 {
994 	if (free_pages_map)
995 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
996 }
997 
swsusp_page_is_free(struct page * page)998 static int swsusp_page_is_free(struct page *page)
999 {
1000 	return free_pages_map ?
1001 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1002 }
1003 
swsusp_unset_page_free(struct page * page)1004 void swsusp_unset_page_free(struct page *page)
1005 {
1006 	if (free_pages_map)
1007 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1008 }
1009 
swsusp_set_page_forbidden(struct page * page)1010 static void swsusp_set_page_forbidden(struct page *page)
1011 {
1012 	if (forbidden_pages_map)
1013 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1014 }
1015 
swsusp_page_is_forbidden(struct page * page)1016 int swsusp_page_is_forbidden(struct page *page)
1017 {
1018 	return forbidden_pages_map ?
1019 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1020 }
1021 
swsusp_unset_page_forbidden(struct page * page)1022 static void swsusp_unset_page_forbidden(struct page *page)
1023 {
1024 	if (forbidden_pages_map)
1025 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1026 }
1027 
1028 /**
1029  * mark_nosave_pages - Mark pages that should not be saved.
1030  * @bm: Memory bitmap.
1031  *
1032  * Set the bits in @bm that correspond to the page frames the contents of which
1033  * should not be saved.
1034  */
mark_nosave_pages(struct memory_bitmap * bm)1035 static void mark_nosave_pages(struct memory_bitmap *bm)
1036 {
1037 	struct nosave_region *region;
1038 
1039 	if (list_empty(&nosave_regions))
1040 		return;
1041 
1042 	list_for_each_entry(region, &nosave_regions, list) {
1043 		unsigned long pfn;
1044 
1045 		pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1046 			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1047 			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1048 				- 1);
1049 
1050 		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1051 			if (pfn_valid(pfn)) {
1052 				/*
1053 				 * It is safe to ignore the result of
1054 				 * mem_bm_set_bit_check() here, since we won't
1055 				 * touch the PFNs for which the error is
1056 				 * returned anyway.
1057 				 */
1058 				mem_bm_set_bit_check(bm, pfn);
1059 			}
1060 	}
1061 }
1062 
1063 /**
1064  * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1065  *
1066  * Create bitmaps needed for marking page frames that should not be saved and
1067  * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1068  * only modified if everything goes well, because we don't want the bits to be
1069  * touched before both bitmaps are set up.
1070  */
create_basic_memory_bitmaps(void)1071 int create_basic_memory_bitmaps(void)
1072 {
1073 	struct memory_bitmap *bm1, *bm2;
1074 	int error = 0;
1075 
1076 	if (forbidden_pages_map && free_pages_map)
1077 		return 0;
1078 	else
1079 		BUG_ON(forbidden_pages_map || free_pages_map);
1080 
1081 	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1082 	if (!bm1)
1083 		return -ENOMEM;
1084 
1085 	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1086 	if (error)
1087 		goto Free_first_object;
1088 
1089 	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1090 	if (!bm2)
1091 		goto Free_first_bitmap;
1092 
1093 	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1094 	if (error)
1095 		goto Free_second_object;
1096 
1097 	forbidden_pages_map = bm1;
1098 	free_pages_map = bm2;
1099 	mark_nosave_pages(forbidden_pages_map);
1100 
1101 	pr_debug("Basic memory bitmaps created\n");
1102 
1103 	return 0;
1104 
1105  Free_second_object:
1106 	kfree(bm2);
1107  Free_first_bitmap:
1108  	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1109  Free_first_object:
1110 	kfree(bm1);
1111 	return -ENOMEM;
1112 }
1113 
1114 /**
1115  * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1116  *
1117  * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1118  * auxiliary pointers are necessary so that the bitmaps themselves are not
1119  * referred to while they are being freed.
1120  */
free_basic_memory_bitmaps(void)1121 void free_basic_memory_bitmaps(void)
1122 {
1123 	struct memory_bitmap *bm1, *bm2;
1124 
1125 	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1126 		return;
1127 
1128 	bm1 = forbidden_pages_map;
1129 	bm2 = free_pages_map;
1130 	forbidden_pages_map = NULL;
1131 	free_pages_map = NULL;
1132 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1133 	kfree(bm1);
1134 	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1135 	kfree(bm2);
1136 
1137 	pr_debug("Basic memory bitmaps freed\n");
1138 }
1139 
clear_free_pages(void)1140 void clear_free_pages(void)
1141 {
1142 	struct memory_bitmap *bm = free_pages_map;
1143 	unsigned long pfn;
1144 
1145 	if (WARN_ON(!(free_pages_map)))
1146 		return;
1147 
1148 	if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
1149 		memory_bm_position_reset(bm);
1150 		pfn = memory_bm_next_pfn(bm);
1151 		while (pfn != BM_END_OF_MAP) {
1152 			if (pfn_valid(pfn))
1153 				clear_highpage(pfn_to_page(pfn));
1154 
1155 			pfn = memory_bm_next_pfn(bm);
1156 		}
1157 		memory_bm_position_reset(bm);
1158 		pr_info("free pages cleared after restore\n");
1159 	}
1160 }
1161 
1162 /**
1163  * snapshot_additional_pages - Estimate the number of extra pages needed.
1164  * @zone: Memory zone to carry out the computation for.
1165  *
1166  * Estimate the number of additional pages needed for setting up a hibernation
1167  * image data structures for @zone (usually, the returned value is greater than
1168  * the exact number).
1169  */
snapshot_additional_pages(struct zone * zone)1170 unsigned int snapshot_additional_pages(struct zone *zone)
1171 {
1172 	unsigned int rtree, nodes;
1173 
1174 	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1175 	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1176 			      LINKED_PAGE_DATA_SIZE);
1177 	while (nodes > 1) {
1178 		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1179 		rtree += nodes;
1180 	}
1181 
1182 	return 2 * rtree;
1183 }
1184 
1185 #ifdef CONFIG_HIGHMEM
1186 /**
1187  * count_free_highmem_pages - Compute the total number of free highmem pages.
1188  *
1189  * The returned number is system-wide.
1190  */
count_free_highmem_pages(void)1191 static unsigned int count_free_highmem_pages(void)
1192 {
1193 	struct zone *zone;
1194 	unsigned int cnt = 0;
1195 
1196 	for_each_populated_zone(zone)
1197 		if (is_highmem(zone))
1198 			cnt += zone_page_state(zone, NR_FREE_PAGES);
1199 
1200 	return cnt;
1201 }
1202 
1203 /**
1204  * saveable_highmem_page - Check if a highmem page is saveable.
1205  *
1206  * Determine whether a highmem page should be included in a hibernation image.
1207  *
1208  * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1209  * and it isn't part of a free chunk of pages.
1210  */
saveable_highmem_page(struct zone * zone,unsigned long pfn)1211 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1212 {
1213 	struct page *page;
1214 
1215 	if (!pfn_valid(pfn))
1216 		return NULL;
1217 
1218 	page = pfn_to_online_page(pfn);
1219 	if (!page || page_zone(page) != zone)
1220 		return NULL;
1221 
1222 	BUG_ON(!PageHighMem(page));
1223 
1224 	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page))
1225 		return NULL;
1226 
1227 	if (PageReserved(page) || PageOffline(page))
1228 		return NULL;
1229 
1230 	if (page_is_guard(page))
1231 		return NULL;
1232 
1233 	return page;
1234 }
1235 
1236 /**
1237  * count_highmem_pages - Compute the total number of saveable highmem pages.
1238  */
count_highmem_pages(void)1239 static unsigned int count_highmem_pages(void)
1240 {
1241 	struct zone *zone;
1242 	unsigned int n = 0;
1243 
1244 	for_each_populated_zone(zone) {
1245 		unsigned long pfn, max_zone_pfn;
1246 
1247 		if (!is_highmem(zone))
1248 			continue;
1249 
1250 		mark_free_pages(zone);
1251 		max_zone_pfn = zone_end_pfn(zone);
1252 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1253 			if (saveable_highmem_page(zone, pfn))
1254 				n++;
1255 	}
1256 	return n;
1257 }
1258 #else
saveable_highmem_page(struct zone * z,unsigned long p)1259 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1260 {
1261 	return NULL;
1262 }
1263 #endif /* CONFIG_HIGHMEM */
1264 
1265 /**
1266  * saveable_page - Check if the given page is saveable.
1267  *
1268  * Determine whether a non-highmem page should be included in a hibernation
1269  * image.
1270  *
1271  * We should save the page if it isn't Nosave, and is not in the range
1272  * of pages statically defined as 'unsaveable', and it isn't part of
1273  * a free chunk of pages.
1274  */
saveable_page(struct zone * zone,unsigned long pfn)1275 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1276 {
1277 	struct page *page;
1278 
1279 	if (!pfn_valid(pfn))
1280 		return NULL;
1281 
1282 	page = pfn_to_online_page(pfn);
1283 	if (!page || page_zone(page) != zone)
1284 		return NULL;
1285 
1286 	BUG_ON(PageHighMem(page));
1287 
1288 	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1289 		return NULL;
1290 
1291 	if (PageOffline(page))
1292 		return NULL;
1293 
1294 	if (PageReserved(page)
1295 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1296 		return NULL;
1297 
1298 	if (page_is_guard(page))
1299 		return NULL;
1300 
1301 	return page;
1302 }
1303 
1304 /**
1305  * count_data_pages - Compute the total number of saveable non-highmem pages.
1306  */
count_data_pages(void)1307 static unsigned int count_data_pages(void)
1308 {
1309 	struct zone *zone;
1310 	unsigned long pfn, max_zone_pfn;
1311 	unsigned int n = 0;
1312 
1313 	for_each_populated_zone(zone) {
1314 		if (is_highmem(zone))
1315 			continue;
1316 
1317 		mark_free_pages(zone);
1318 		max_zone_pfn = zone_end_pfn(zone);
1319 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1320 			if (saveable_page(zone, pfn))
1321 				n++;
1322 	}
1323 	return n;
1324 }
1325 
1326 /*
1327  * This is needed, because copy_page and memcpy are not usable for copying
1328  * task structs.
1329  */
do_copy_page(long * dst,long * src)1330 static inline void do_copy_page(long *dst, long *src)
1331 {
1332 	int n;
1333 
1334 	for (n = PAGE_SIZE / sizeof(long); n; n--)
1335 		*dst++ = *src++;
1336 }
1337 
1338 /**
1339  * safe_copy_page - Copy a page in a safe way.
1340  *
1341  * Check if the page we are going to copy is marked as present in the kernel
1342  * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1343  * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1344  * always returns 'true'.
1345  */
safe_copy_page(void * dst,struct page * s_page)1346 static void safe_copy_page(void *dst, struct page *s_page)
1347 {
1348 	if (kernel_page_present(s_page)) {
1349 		do_copy_page(dst, page_address(s_page));
1350 	} else {
1351 		kernel_map_pages(s_page, 1, 1);
1352 		do_copy_page(dst, page_address(s_page));
1353 		kernel_map_pages(s_page, 1, 0);
1354 	}
1355 }
1356 
1357 #ifdef CONFIG_HIGHMEM
page_is_saveable(struct zone * zone,unsigned long pfn)1358 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1359 {
1360 	return is_highmem(zone) ?
1361 		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1362 }
1363 
copy_data_page(unsigned long dst_pfn,unsigned long src_pfn)1364 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1365 {
1366 	struct page *s_page, *d_page;
1367 	void *src, *dst;
1368 
1369 	s_page = pfn_to_page(src_pfn);
1370 	d_page = pfn_to_page(dst_pfn);
1371 	if (PageHighMem(s_page)) {
1372 		src = kmap_atomic(s_page);
1373 		dst = kmap_atomic(d_page);
1374 		do_copy_page(dst, src);
1375 		kunmap_atomic(dst);
1376 		kunmap_atomic(src);
1377 	} else {
1378 		if (PageHighMem(d_page)) {
1379 			/*
1380 			 * The page pointed to by src may contain some kernel
1381 			 * data modified by kmap_atomic()
1382 			 */
1383 			safe_copy_page(buffer, s_page);
1384 			dst = kmap_atomic(d_page);
1385 			copy_page(dst, buffer);
1386 			kunmap_atomic(dst);
1387 		} else {
1388 			safe_copy_page(page_address(d_page), s_page);
1389 		}
1390 	}
1391 }
1392 #else
1393 #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1394 
copy_data_page(unsigned long dst_pfn,unsigned long src_pfn)1395 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1396 {
1397 	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1398 				pfn_to_page(src_pfn));
1399 }
1400 #endif /* CONFIG_HIGHMEM */
1401 
copy_data_pages(struct memory_bitmap * copy_bm,struct memory_bitmap * orig_bm)1402 static void copy_data_pages(struct memory_bitmap *copy_bm,
1403 			    struct memory_bitmap *orig_bm)
1404 {
1405 	struct zone *zone;
1406 	unsigned long pfn;
1407 
1408 	for_each_populated_zone(zone) {
1409 		unsigned long max_zone_pfn;
1410 
1411 		mark_free_pages(zone);
1412 		max_zone_pfn = zone_end_pfn(zone);
1413 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1414 			if (page_is_saveable(zone, pfn))
1415 				memory_bm_set_bit(orig_bm, pfn);
1416 	}
1417 	memory_bm_position_reset(orig_bm);
1418 	memory_bm_position_reset(copy_bm);
1419 	for(;;) {
1420 		pfn = memory_bm_next_pfn(orig_bm);
1421 		if (unlikely(pfn == BM_END_OF_MAP))
1422 			break;
1423 		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1424 	}
1425 }
1426 
1427 /* Total number of image pages */
1428 static unsigned int nr_copy_pages;
1429 /* Number of pages needed for saving the original pfns of the image pages */
1430 static unsigned int nr_meta_pages;
1431 /*
1432  * Numbers of normal and highmem page frames allocated for hibernation image
1433  * before suspending devices.
1434  */
1435 static unsigned int alloc_normal, alloc_highmem;
1436 /*
1437  * Memory bitmap used for marking saveable pages (during hibernation) or
1438  * hibernation image pages (during restore)
1439  */
1440 static struct memory_bitmap orig_bm;
1441 /*
1442  * Memory bitmap used during hibernation for marking allocated page frames that
1443  * will contain copies of saveable pages.  During restore it is initially used
1444  * for marking hibernation image pages, but then the set bits from it are
1445  * duplicated in @orig_bm and it is released.  On highmem systems it is next
1446  * used for marking "safe" highmem pages, but it has to be reinitialized for
1447  * this purpose.
1448  */
1449 static struct memory_bitmap copy_bm;
1450 
1451 /**
1452  * swsusp_free - Free pages allocated for hibernation image.
1453  *
1454  * Image pages are alocated before snapshot creation, so they need to be
1455  * released after resume.
1456  */
swsusp_free(void)1457 void swsusp_free(void)
1458 {
1459 	unsigned long fb_pfn, fr_pfn;
1460 
1461 	if (!forbidden_pages_map || !free_pages_map)
1462 		goto out;
1463 
1464 	memory_bm_position_reset(forbidden_pages_map);
1465 	memory_bm_position_reset(free_pages_map);
1466 
1467 loop:
1468 	fr_pfn = memory_bm_next_pfn(free_pages_map);
1469 	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1470 
1471 	/*
1472 	 * Find the next bit set in both bitmaps. This is guaranteed to
1473 	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1474 	 */
1475 	do {
1476 		if (fb_pfn < fr_pfn)
1477 			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1478 		if (fr_pfn < fb_pfn)
1479 			fr_pfn = memory_bm_next_pfn(free_pages_map);
1480 	} while (fb_pfn != fr_pfn);
1481 
1482 	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1483 		struct page *page = pfn_to_page(fr_pfn);
1484 
1485 		memory_bm_clear_current(forbidden_pages_map);
1486 		memory_bm_clear_current(free_pages_map);
1487 		hibernate_restore_unprotect_page(page_address(page));
1488 		__free_page(page);
1489 		goto loop;
1490 	}
1491 
1492 out:
1493 	nr_copy_pages = 0;
1494 	nr_meta_pages = 0;
1495 	restore_pblist = NULL;
1496 	buffer = NULL;
1497 	alloc_normal = 0;
1498 	alloc_highmem = 0;
1499 	hibernate_restore_protection_end();
1500 }
1501 
1502 /* Helper functions used for the shrinking of memory. */
1503 
1504 #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1505 
1506 /**
1507  * preallocate_image_pages - Allocate a number of pages for hibernation image.
1508  * @nr_pages: Number of page frames to allocate.
1509  * @mask: GFP flags to use for the allocation.
1510  *
1511  * Return value: Number of page frames actually allocated
1512  */
preallocate_image_pages(unsigned long nr_pages,gfp_t mask)1513 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1514 {
1515 	unsigned long nr_alloc = 0;
1516 
1517 	while (nr_pages > 0) {
1518 		struct page *page;
1519 
1520 		page = alloc_image_page(mask);
1521 		if (!page)
1522 			break;
1523 		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1524 		if (PageHighMem(page))
1525 			alloc_highmem++;
1526 		else
1527 			alloc_normal++;
1528 		nr_pages--;
1529 		nr_alloc++;
1530 	}
1531 
1532 	return nr_alloc;
1533 }
1534 
preallocate_image_memory(unsigned long nr_pages,unsigned long avail_normal)1535 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1536 					      unsigned long avail_normal)
1537 {
1538 	unsigned long alloc;
1539 
1540 	if (avail_normal <= alloc_normal)
1541 		return 0;
1542 
1543 	alloc = avail_normal - alloc_normal;
1544 	if (nr_pages < alloc)
1545 		alloc = nr_pages;
1546 
1547 	return preallocate_image_pages(alloc, GFP_IMAGE);
1548 }
1549 
1550 #ifdef CONFIG_HIGHMEM
preallocate_image_highmem(unsigned long nr_pages)1551 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1552 {
1553 	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1554 }
1555 
1556 /**
1557  *  __fraction - Compute (an approximation of) x * (multiplier / base).
1558  */
__fraction(u64 x,u64 multiplier,u64 base)1559 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1560 {
1561 	return div64_u64(x * multiplier, base);
1562 }
1563 
preallocate_highmem_fraction(unsigned long nr_pages,unsigned long highmem,unsigned long total)1564 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1565 						  unsigned long highmem,
1566 						  unsigned long total)
1567 {
1568 	unsigned long alloc = __fraction(nr_pages, highmem, total);
1569 
1570 	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1571 }
1572 #else /* CONFIG_HIGHMEM */
preallocate_image_highmem(unsigned long nr_pages)1573 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1574 {
1575 	return 0;
1576 }
1577 
preallocate_highmem_fraction(unsigned long nr_pages,unsigned long highmem,unsigned long total)1578 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1579 							 unsigned long highmem,
1580 							 unsigned long total)
1581 {
1582 	return 0;
1583 }
1584 #endif /* CONFIG_HIGHMEM */
1585 
1586 /**
1587  * free_unnecessary_pages - Release preallocated pages not needed for the image.
1588  */
free_unnecessary_pages(void)1589 static unsigned long free_unnecessary_pages(void)
1590 {
1591 	unsigned long save, to_free_normal, to_free_highmem, free;
1592 
1593 	save = count_data_pages();
1594 	if (alloc_normal >= save) {
1595 		to_free_normal = alloc_normal - save;
1596 		save = 0;
1597 	} else {
1598 		to_free_normal = 0;
1599 		save -= alloc_normal;
1600 	}
1601 	save += count_highmem_pages();
1602 	if (alloc_highmem >= save) {
1603 		to_free_highmem = alloc_highmem - save;
1604 	} else {
1605 		to_free_highmem = 0;
1606 		save -= alloc_highmem;
1607 		if (to_free_normal > save)
1608 			to_free_normal -= save;
1609 		else
1610 			to_free_normal = 0;
1611 	}
1612 	free = to_free_normal + to_free_highmem;
1613 
1614 	memory_bm_position_reset(&copy_bm);
1615 
1616 	while (to_free_normal > 0 || to_free_highmem > 0) {
1617 		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1618 		struct page *page = pfn_to_page(pfn);
1619 
1620 		if (PageHighMem(page)) {
1621 			if (!to_free_highmem)
1622 				continue;
1623 			to_free_highmem--;
1624 			alloc_highmem--;
1625 		} else {
1626 			if (!to_free_normal)
1627 				continue;
1628 			to_free_normal--;
1629 			alloc_normal--;
1630 		}
1631 		memory_bm_clear_bit(&copy_bm, pfn);
1632 		swsusp_unset_page_forbidden(page);
1633 		swsusp_unset_page_free(page);
1634 		__free_page(page);
1635 	}
1636 
1637 	return free;
1638 }
1639 
1640 /**
1641  * minimum_image_size - Estimate the minimum acceptable size of an image.
1642  * @saveable: Number of saveable pages in the system.
1643  *
1644  * We want to avoid attempting to free too much memory too hard, so estimate the
1645  * minimum acceptable size of a hibernation image to use as the lower limit for
1646  * preallocating memory.
1647  *
1648  * We assume that the minimum image size should be proportional to
1649  *
1650  * [number of saveable pages] - [number of pages that can be freed in theory]
1651  *
1652  * where the second term is the sum of (1) reclaimable slab pages, (2) active
1653  * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1654  */
minimum_image_size(unsigned long saveable)1655 static unsigned long minimum_image_size(unsigned long saveable)
1656 {
1657 	unsigned long size;
1658 
1659 	size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
1660 		+ global_node_page_state(NR_ACTIVE_ANON)
1661 		+ global_node_page_state(NR_INACTIVE_ANON)
1662 		+ global_node_page_state(NR_ACTIVE_FILE)
1663 		+ global_node_page_state(NR_INACTIVE_FILE);
1664 
1665 	return saveable <= size ? 0 : saveable - size;
1666 }
1667 
1668 /**
1669  * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1670  *
1671  * To create a hibernation image it is necessary to make a copy of every page
1672  * frame in use.  We also need a number of page frames to be free during
1673  * hibernation for allocations made while saving the image and for device
1674  * drivers, in case they need to allocate memory from their hibernation
1675  * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1676  * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
1677  * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1678  * total number of available page frames and allocate at least
1679  *
1680  * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
1681  *  - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1682  *
1683  * of them, which corresponds to the maximum size of a hibernation image.
1684  *
1685  * If image_size is set below the number following from the above formula,
1686  * the preallocation of memory is continued until the total number of saveable
1687  * pages in the system is below the requested image size or the minimum
1688  * acceptable image size returned by minimum_image_size(), whichever is greater.
1689  */
hibernate_preallocate_memory(void)1690 int hibernate_preallocate_memory(void)
1691 {
1692 	struct zone *zone;
1693 	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1694 	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1695 	ktime_t start, stop;
1696 	int error;
1697 
1698 	pr_info("Preallocating image memory\n");
1699 	start = ktime_get();
1700 
1701 	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1702 	if (error) {
1703 		pr_err("Cannot allocate original bitmap\n");
1704 		goto err_out;
1705 	}
1706 
1707 	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1708 	if (error) {
1709 		pr_err("Cannot allocate copy bitmap\n");
1710 		goto err_out;
1711 	}
1712 
1713 	alloc_normal = 0;
1714 	alloc_highmem = 0;
1715 
1716 	/* Count the number of saveable data pages. */
1717 	save_highmem = count_highmem_pages();
1718 	saveable = count_data_pages();
1719 
1720 	/*
1721 	 * Compute the total number of page frames we can use (count) and the
1722 	 * number of pages needed for image metadata (size).
1723 	 */
1724 	count = saveable;
1725 	saveable += save_highmem;
1726 	highmem = save_highmem;
1727 	size = 0;
1728 	for_each_populated_zone(zone) {
1729 		size += snapshot_additional_pages(zone);
1730 		if (is_highmem(zone))
1731 			highmem += zone_page_state(zone, NR_FREE_PAGES);
1732 		else
1733 			count += zone_page_state(zone, NR_FREE_PAGES);
1734 	}
1735 	avail_normal = count;
1736 	count += highmem;
1737 	count -= totalreserve_pages;
1738 
1739 	/* Compute the maximum number of saveable pages to leave in memory. */
1740 	max_size = (count - (size + PAGES_FOR_IO)) / 2
1741 			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1742 	/* Compute the desired number of image pages specified by image_size. */
1743 	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1744 	if (size > max_size)
1745 		size = max_size;
1746 	/*
1747 	 * If the desired number of image pages is at least as large as the
1748 	 * current number of saveable pages in memory, allocate page frames for
1749 	 * the image and we're done.
1750 	 */
1751 	if (size >= saveable) {
1752 		pages = preallocate_image_highmem(save_highmem);
1753 		pages += preallocate_image_memory(saveable - pages, avail_normal);
1754 		goto out;
1755 	}
1756 
1757 	/* Estimate the minimum size of the image. */
1758 	pages = minimum_image_size(saveable);
1759 	/*
1760 	 * To avoid excessive pressure on the normal zone, leave room in it to
1761 	 * accommodate an image of the minimum size (unless it's already too
1762 	 * small, in which case don't preallocate pages from it at all).
1763 	 */
1764 	if (avail_normal > pages)
1765 		avail_normal -= pages;
1766 	else
1767 		avail_normal = 0;
1768 	if (size < pages)
1769 		size = min_t(unsigned long, pages, max_size);
1770 
1771 	/*
1772 	 * Let the memory management subsystem know that we're going to need a
1773 	 * large number of page frames to allocate and make it free some memory.
1774 	 * NOTE: If this is not done, performance will be hurt badly in some
1775 	 * test cases.
1776 	 */
1777 	shrink_all_memory(saveable - size);
1778 
1779 	/*
1780 	 * The number of saveable pages in memory was too high, so apply some
1781 	 * pressure to decrease it.  First, make room for the largest possible
1782 	 * image and fail if that doesn't work.  Next, try to decrease the size
1783 	 * of the image as much as indicated by 'size' using allocations from
1784 	 * highmem and non-highmem zones separately.
1785 	 */
1786 	pages_highmem = preallocate_image_highmem(highmem / 2);
1787 	alloc = count - max_size;
1788 	if (alloc > pages_highmem)
1789 		alloc -= pages_highmem;
1790 	else
1791 		alloc = 0;
1792 	pages = preallocate_image_memory(alloc, avail_normal);
1793 	if (pages < alloc) {
1794 		/* We have exhausted non-highmem pages, try highmem. */
1795 		alloc -= pages;
1796 		pages += pages_highmem;
1797 		pages_highmem = preallocate_image_highmem(alloc);
1798 		if (pages_highmem < alloc) {
1799 			pr_err("Image allocation is %lu pages short\n",
1800 				alloc - pages_highmem);
1801 			goto err_out;
1802 		}
1803 		pages += pages_highmem;
1804 		/*
1805 		 * size is the desired number of saveable pages to leave in
1806 		 * memory, so try to preallocate (all memory - size) pages.
1807 		 */
1808 		alloc = (count - pages) - size;
1809 		pages += preallocate_image_highmem(alloc);
1810 	} else {
1811 		/*
1812 		 * There are approximately max_size saveable pages at this point
1813 		 * and we want to reduce this number down to size.
1814 		 */
1815 		alloc = max_size - size;
1816 		size = preallocate_highmem_fraction(alloc, highmem, count);
1817 		pages_highmem += size;
1818 		alloc -= size;
1819 		size = preallocate_image_memory(alloc, avail_normal);
1820 		pages_highmem += preallocate_image_highmem(alloc - size);
1821 		pages += pages_highmem + size;
1822 	}
1823 
1824 	/*
1825 	 * We only need as many page frames for the image as there are saveable
1826 	 * pages in memory, but we have allocated more.  Release the excessive
1827 	 * ones now.
1828 	 */
1829 	pages -= free_unnecessary_pages();
1830 
1831  out:
1832 	stop = ktime_get();
1833 	pr_info("Allocated %lu pages for snapshot\n", pages);
1834 	swsusp_show_speed(start, stop, pages, "Allocated");
1835 
1836 	return 0;
1837 
1838  err_out:
1839 	swsusp_free();
1840 	return -ENOMEM;
1841 }
1842 
1843 #ifdef CONFIG_HIGHMEM
1844 /**
1845  * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1846  *
1847  * Compute the number of non-highmem pages that will be necessary for creating
1848  * copies of highmem pages.
1849  */
count_pages_for_highmem(unsigned int nr_highmem)1850 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1851 {
1852 	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1853 
1854 	if (free_highmem >= nr_highmem)
1855 		nr_highmem = 0;
1856 	else
1857 		nr_highmem -= free_highmem;
1858 
1859 	return nr_highmem;
1860 }
1861 #else
count_pages_for_highmem(unsigned int nr_highmem)1862 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1863 #endif /* CONFIG_HIGHMEM */
1864 
1865 /**
1866  * enough_free_mem - Check if there is enough free memory for the image.
1867  */
enough_free_mem(unsigned int nr_pages,unsigned int nr_highmem)1868 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1869 {
1870 	struct zone *zone;
1871 	unsigned int free = alloc_normal;
1872 
1873 	for_each_populated_zone(zone)
1874 		if (!is_highmem(zone))
1875 			free += zone_page_state(zone, NR_FREE_PAGES);
1876 
1877 	nr_pages += count_pages_for_highmem(nr_highmem);
1878 	pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1879 		 nr_pages, PAGES_FOR_IO, free);
1880 
1881 	return free > nr_pages + PAGES_FOR_IO;
1882 }
1883 
1884 #ifdef CONFIG_HIGHMEM
1885 /**
1886  * get_highmem_buffer - Allocate a buffer for highmem pages.
1887  *
1888  * If there are some highmem pages in the hibernation image, we may need a
1889  * buffer to copy them and/or load their data.
1890  */
get_highmem_buffer(int safe_needed)1891 static inline int get_highmem_buffer(int safe_needed)
1892 {
1893 	buffer = get_image_page(GFP_ATOMIC, safe_needed);
1894 	return buffer ? 0 : -ENOMEM;
1895 }
1896 
1897 /**
1898  * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1899  *
1900  * Try to allocate as many pages as needed, but if the number of free highmem
1901  * pages is less than that, allocate them all.
1902  */
alloc_highmem_pages(struct memory_bitmap * bm,unsigned int nr_highmem)1903 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1904 					       unsigned int nr_highmem)
1905 {
1906 	unsigned int to_alloc = count_free_highmem_pages();
1907 
1908 	if (to_alloc > nr_highmem)
1909 		to_alloc = nr_highmem;
1910 
1911 	nr_highmem -= to_alloc;
1912 	while (to_alloc-- > 0) {
1913 		struct page *page;
1914 
1915 		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1916 		memory_bm_set_bit(bm, page_to_pfn(page));
1917 	}
1918 	return nr_highmem;
1919 }
1920 #else
get_highmem_buffer(int safe_needed)1921 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1922 
alloc_highmem_pages(struct memory_bitmap * bm,unsigned int n)1923 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1924 					       unsigned int n) { return 0; }
1925 #endif /* CONFIG_HIGHMEM */
1926 
1927 /**
1928  * swsusp_alloc - Allocate memory for hibernation image.
1929  *
1930  * We first try to allocate as many highmem pages as there are
1931  * saveable highmem pages in the system.  If that fails, we allocate
1932  * non-highmem pages for the copies of the remaining highmem ones.
1933  *
1934  * In this approach it is likely that the copies of highmem pages will
1935  * also be located in the high memory, because of the way in which
1936  * copy_data_pages() works.
1937  */
swsusp_alloc(struct memory_bitmap * copy_bm,unsigned int nr_pages,unsigned int nr_highmem)1938 static int swsusp_alloc(struct memory_bitmap *copy_bm,
1939 			unsigned int nr_pages, unsigned int nr_highmem)
1940 {
1941 	if (nr_highmem > 0) {
1942 		if (get_highmem_buffer(PG_ANY))
1943 			goto err_out;
1944 		if (nr_highmem > alloc_highmem) {
1945 			nr_highmem -= alloc_highmem;
1946 			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1947 		}
1948 	}
1949 	if (nr_pages > alloc_normal) {
1950 		nr_pages -= alloc_normal;
1951 		while (nr_pages-- > 0) {
1952 			struct page *page;
1953 
1954 			page = alloc_image_page(GFP_ATOMIC);
1955 			if (!page)
1956 				goto err_out;
1957 			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1958 		}
1959 	}
1960 
1961 	return 0;
1962 
1963  err_out:
1964 	swsusp_free();
1965 	return -ENOMEM;
1966 }
1967 
swsusp_save(void)1968 asmlinkage __visible int swsusp_save(void)
1969 {
1970 	unsigned int nr_pages, nr_highmem;
1971 
1972 	pr_info("Creating image:\n");
1973 
1974 	drain_local_pages(NULL);
1975 	nr_pages = count_data_pages();
1976 	nr_highmem = count_highmem_pages();
1977 	pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1978 
1979 	if (!enough_free_mem(nr_pages, nr_highmem)) {
1980 		pr_err("Not enough free memory\n");
1981 		return -ENOMEM;
1982 	}
1983 
1984 	if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
1985 		pr_err("Memory allocation failed\n");
1986 		return -ENOMEM;
1987 	}
1988 
1989 	/*
1990 	 * During allocating of suspend pagedir, new cold pages may appear.
1991 	 * Kill them.
1992 	 */
1993 	drain_local_pages(NULL);
1994 	copy_data_pages(&copy_bm, &orig_bm);
1995 
1996 	/*
1997 	 * End of critical section. From now on, we can write to memory,
1998 	 * but we should not touch disk. This specially means we must _not_
1999 	 * touch swap space! Except we must write out our image of course.
2000 	 */
2001 
2002 	nr_pages += nr_highmem;
2003 	nr_copy_pages = nr_pages;
2004 	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2005 
2006 	pr_info("Image created (%d pages copied)\n", nr_pages);
2007 
2008 	return 0;
2009 }
2010 
2011 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
init_header_complete(struct swsusp_info * info)2012 static int init_header_complete(struct swsusp_info *info)
2013 {
2014 	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2015 	info->version_code = LINUX_VERSION_CODE;
2016 	return 0;
2017 }
2018 
check_image_kernel(struct swsusp_info * info)2019 static const char *check_image_kernel(struct swsusp_info *info)
2020 {
2021 	if (info->version_code != LINUX_VERSION_CODE)
2022 		return "kernel version";
2023 	if (strcmp(info->uts.sysname,init_utsname()->sysname))
2024 		return "system type";
2025 	if (strcmp(info->uts.release,init_utsname()->release))
2026 		return "kernel release";
2027 	if (strcmp(info->uts.version,init_utsname()->version))
2028 		return "version";
2029 	if (strcmp(info->uts.machine,init_utsname()->machine))
2030 		return "machine";
2031 	return NULL;
2032 }
2033 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2034 
snapshot_get_image_size(void)2035 unsigned long snapshot_get_image_size(void)
2036 {
2037 	return nr_copy_pages + nr_meta_pages + 1;
2038 }
2039 
init_header(struct swsusp_info * info)2040 static int init_header(struct swsusp_info *info)
2041 {
2042 	memset(info, 0, sizeof(struct swsusp_info));
2043 	info->num_physpages = get_num_physpages();
2044 	info->image_pages = nr_copy_pages;
2045 	info->pages = snapshot_get_image_size();
2046 	info->size = info->pages;
2047 	info->size <<= PAGE_SHIFT;
2048 	return init_header_complete(info);
2049 }
2050 
2051 /**
2052  * pack_pfns - Prepare PFNs for saving.
2053  * @bm: Memory bitmap.
2054  * @buf: Memory buffer to store the PFNs in.
2055  *
2056  * PFNs corresponding to set bits in @bm are stored in the area of memory
2057  * pointed to by @buf (1 page at a time).
2058  */
pack_pfns(unsigned long * buf,struct memory_bitmap * bm)2059 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2060 {
2061 	int j;
2062 
2063 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2064 		buf[j] = memory_bm_next_pfn(bm);
2065 		if (unlikely(buf[j] == BM_END_OF_MAP))
2066 			break;
2067 	}
2068 }
2069 
2070 /**
2071  * snapshot_read_next - Get the address to read the next image page from.
2072  * @handle: Snapshot handle to be used for the reading.
2073  *
2074  * On the first call, @handle should point to a zeroed snapshot_handle
2075  * structure.  The structure gets populated then and a pointer to it should be
2076  * passed to this function every next time.
2077  *
2078  * On success, the function returns a positive number.  Then, the caller
2079  * is allowed to read up to the returned number of bytes from the memory
2080  * location computed by the data_of() macro.
2081  *
2082  * The function returns 0 to indicate the end of the data stream condition,
2083  * and negative numbers are returned on errors.  If that happens, the structure
2084  * pointed to by @handle is not updated and should not be used any more.
2085  */
snapshot_read_next(struct snapshot_handle * handle)2086 int snapshot_read_next(struct snapshot_handle *handle)
2087 {
2088 	if (handle->cur > nr_meta_pages + nr_copy_pages)
2089 		return 0;
2090 
2091 	if (!buffer) {
2092 		/* This makes the buffer be freed by swsusp_free() */
2093 		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2094 		if (!buffer)
2095 			return -ENOMEM;
2096 	}
2097 	if (!handle->cur) {
2098 		int error;
2099 
2100 		error = init_header((struct swsusp_info *)buffer);
2101 		if (error)
2102 			return error;
2103 		handle->buffer = buffer;
2104 		memory_bm_position_reset(&orig_bm);
2105 		memory_bm_position_reset(&copy_bm);
2106 	} else if (handle->cur <= nr_meta_pages) {
2107 		clear_page(buffer);
2108 		pack_pfns(buffer, &orig_bm);
2109 	} else {
2110 		struct page *page;
2111 
2112 		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2113 		if (PageHighMem(page)) {
2114 			/*
2115 			 * Highmem pages are copied to the buffer,
2116 			 * because we can't return with a kmapped
2117 			 * highmem page (we may not be called again).
2118 			 */
2119 			void *kaddr;
2120 
2121 			kaddr = kmap_atomic(page);
2122 			copy_page(buffer, kaddr);
2123 			kunmap_atomic(kaddr);
2124 			handle->buffer = buffer;
2125 		} else {
2126 			handle->buffer = page_address(page);
2127 		}
2128 	}
2129 	handle->cur++;
2130 	return PAGE_SIZE;
2131 }
2132 
duplicate_memory_bitmap(struct memory_bitmap * dst,struct memory_bitmap * src)2133 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2134 				    struct memory_bitmap *src)
2135 {
2136 	unsigned long pfn;
2137 
2138 	memory_bm_position_reset(src);
2139 	pfn = memory_bm_next_pfn(src);
2140 	while (pfn != BM_END_OF_MAP) {
2141 		memory_bm_set_bit(dst, pfn);
2142 		pfn = memory_bm_next_pfn(src);
2143 	}
2144 }
2145 
2146 /**
2147  * mark_unsafe_pages - Mark pages that were used before hibernation.
2148  *
2149  * Mark the pages that cannot be used for storing the image during restoration,
2150  * because they conflict with the pages that had been used before hibernation.
2151  */
mark_unsafe_pages(struct memory_bitmap * bm)2152 static void mark_unsafe_pages(struct memory_bitmap *bm)
2153 {
2154 	unsigned long pfn;
2155 
2156 	/* Clear the "free"/"unsafe" bit for all PFNs */
2157 	memory_bm_position_reset(free_pages_map);
2158 	pfn = memory_bm_next_pfn(free_pages_map);
2159 	while (pfn != BM_END_OF_MAP) {
2160 		memory_bm_clear_current(free_pages_map);
2161 		pfn = memory_bm_next_pfn(free_pages_map);
2162 	}
2163 
2164 	/* Mark pages that correspond to the "original" PFNs as "unsafe" */
2165 	duplicate_memory_bitmap(free_pages_map, bm);
2166 
2167 	allocated_unsafe_pages = 0;
2168 }
2169 
check_header(struct swsusp_info * info)2170 static int check_header(struct swsusp_info *info)
2171 {
2172 	const char *reason;
2173 
2174 	reason = check_image_kernel(info);
2175 	if (!reason && info->num_physpages != get_num_physpages())
2176 		reason = "memory size";
2177 	if (reason) {
2178 		pr_err("Image mismatch: %s\n", reason);
2179 		return -EPERM;
2180 	}
2181 	return 0;
2182 }
2183 
2184 /**
2185  * load header - Check the image header and copy the data from it.
2186  */
load_header(struct swsusp_info * info)2187 static int load_header(struct swsusp_info *info)
2188 {
2189 	int error;
2190 
2191 	restore_pblist = NULL;
2192 	error = check_header(info);
2193 	if (!error) {
2194 		nr_copy_pages = info->image_pages;
2195 		nr_meta_pages = info->pages - info->image_pages - 1;
2196 	}
2197 	return error;
2198 }
2199 
2200 /**
2201  * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2202  * @bm: Memory bitmap.
2203  * @buf: Area of memory containing the PFNs.
2204  *
2205  * For each element of the array pointed to by @buf (1 page at a time), set the
2206  * corresponding bit in @bm.
2207  */
unpack_orig_pfns(unsigned long * buf,struct memory_bitmap * bm)2208 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2209 {
2210 	int j;
2211 
2212 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2213 		if (unlikely(buf[j] == BM_END_OF_MAP))
2214 			break;
2215 
2216 		if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2217 			memory_bm_set_bit(bm, buf[j]);
2218 		else
2219 			return -EFAULT;
2220 	}
2221 
2222 	return 0;
2223 }
2224 
2225 #ifdef CONFIG_HIGHMEM
2226 /*
2227  * struct highmem_pbe is used for creating the list of highmem pages that
2228  * should be restored atomically during the resume from disk, because the page
2229  * frames they have occupied before the suspend are in use.
2230  */
2231 struct highmem_pbe {
2232 	struct page *copy_page;	/* data is here now */
2233 	struct page *orig_page;	/* data was here before the suspend */
2234 	struct highmem_pbe *next;
2235 };
2236 
2237 /*
2238  * List of highmem PBEs needed for restoring the highmem pages that were
2239  * allocated before the suspend and included in the suspend image, but have
2240  * also been allocated by the "resume" kernel, so their contents cannot be
2241  * written directly to their "original" page frames.
2242  */
2243 static struct highmem_pbe *highmem_pblist;
2244 
2245 /**
2246  * count_highmem_image_pages - Compute the number of highmem pages in the image.
2247  * @bm: Memory bitmap.
2248  *
2249  * The bits in @bm that correspond to image pages are assumed to be set.
2250  */
count_highmem_image_pages(struct memory_bitmap * bm)2251 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2252 {
2253 	unsigned long pfn;
2254 	unsigned int cnt = 0;
2255 
2256 	memory_bm_position_reset(bm);
2257 	pfn = memory_bm_next_pfn(bm);
2258 	while (pfn != BM_END_OF_MAP) {
2259 		if (PageHighMem(pfn_to_page(pfn)))
2260 			cnt++;
2261 
2262 		pfn = memory_bm_next_pfn(bm);
2263 	}
2264 	return cnt;
2265 }
2266 
2267 static unsigned int safe_highmem_pages;
2268 
2269 static struct memory_bitmap *safe_highmem_bm;
2270 
2271 /**
2272  * prepare_highmem_image - Allocate memory for loading highmem data from image.
2273  * @bm: Pointer to an uninitialized memory bitmap structure.
2274  * @nr_highmem_p: Pointer to the number of highmem image pages.
2275  *
2276  * Try to allocate as many highmem pages as there are highmem image pages
2277  * (@nr_highmem_p points to the variable containing the number of highmem image
2278  * pages).  The pages that are "safe" (ie. will not be overwritten when the
2279  * hibernation image is restored entirely) have the corresponding bits set in
2280  * @bm (it must be unitialized).
2281  *
2282  * NOTE: This function should not be called if there are no highmem image pages.
2283  */
prepare_highmem_image(struct memory_bitmap * bm,unsigned int * nr_highmem_p)2284 static int prepare_highmem_image(struct memory_bitmap *bm,
2285 				 unsigned int *nr_highmem_p)
2286 {
2287 	unsigned int to_alloc;
2288 
2289 	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2290 		return -ENOMEM;
2291 
2292 	if (get_highmem_buffer(PG_SAFE))
2293 		return -ENOMEM;
2294 
2295 	to_alloc = count_free_highmem_pages();
2296 	if (to_alloc > *nr_highmem_p)
2297 		to_alloc = *nr_highmem_p;
2298 	else
2299 		*nr_highmem_p = to_alloc;
2300 
2301 	safe_highmem_pages = 0;
2302 	while (to_alloc-- > 0) {
2303 		struct page *page;
2304 
2305 		page = alloc_page(__GFP_HIGHMEM);
2306 		if (!swsusp_page_is_free(page)) {
2307 			/* The page is "safe", set its bit the bitmap */
2308 			memory_bm_set_bit(bm, page_to_pfn(page));
2309 			safe_highmem_pages++;
2310 		}
2311 		/* Mark the page as allocated */
2312 		swsusp_set_page_forbidden(page);
2313 		swsusp_set_page_free(page);
2314 	}
2315 	memory_bm_position_reset(bm);
2316 	safe_highmem_bm = bm;
2317 	return 0;
2318 }
2319 
2320 static struct page *last_highmem_page;
2321 
2322 /**
2323  * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2324  *
2325  * For a given highmem image page get a buffer that suspend_write_next() should
2326  * return to its caller to write to.
2327  *
2328  * If the page is to be saved to its "original" page frame or a copy of
2329  * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2330  * the copy of the page is to be made in normal memory, so the address of
2331  * the copy is returned.
2332  *
2333  * If @buffer is returned, the caller of suspend_write_next() will write
2334  * the page's contents to @buffer, so they will have to be copied to the
2335  * right location on the next call to suspend_write_next() and it is done
2336  * with the help of copy_last_highmem_page().  For this purpose, if
2337  * @buffer is returned, @last_highmem_page is set to the page to which
2338  * the data will have to be copied from @buffer.
2339  */
get_highmem_page_buffer(struct page * page,struct chain_allocator * ca)2340 static void *get_highmem_page_buffer(struct page *page,
2341 				     struct chain_allocator *ca)
2342 {
2343 	struct highmem_pbe *pbe;
2344 	void *kaddr;
2345 
2346 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2347 		/*
2348 		 * We have allocated the "original" page frame and we can
2349 		 * use it directly to store the loaded page.
2350 		 */
2351 		last_highmem_page = page;
2352 		return buffer;
2353 	}
2354 	/*
2355 	 * The "original" page frame has not been allocated and we have to
2356 	 * use a "safe" page frame to store the loaded page.
2357 	 */
2358 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2359 	if (!pbe) {
2360 		swsusp_free();
2361 		return ERR_PTR(-ENOMEM);
2362 	}
2363 	pbe->orig_page = page;
2364 	if (safe_highmem_pages > 0) {
2365 		struct page *tmp;
2366 
2367 		/* Copy of the page will be stored in high memory */
2368 		kaddr = buffer;
2369 		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2370 		safe_highmem_pages--;
2371 		last_highmem_page = tmp;
2372 		pbe->copy_page = tmp;
2373 	} else {
2374 		/* Copy of the page will be stored in normal memory */
2375 		kaddr = safe_pages_list;
2376 		safe_pages_list = safe_pages_list->next;
2377 		pbe->copy_page = virt_to_page(kaddr);
2378 	}
2379 	pbe->next = highmem_pblist;
2380 	highmem_pblist = pbe;
2381 	return kaddr;
2382 }
2383 
2384 /**
2385  * copy_last_highmem_page - Copy most the most recent highmem image page.
2386  *
2387  * Copy the contents of a highmem image from @buffer, where the caller of
2388  * snapshot_write_next() has stored them, to the right location represented by
2389  * @last_highmem_page .
2390  */
copy_last_highmem_page(void)2391 static void copy_last_highmem_page(void)
2392 {
2393 	if (last_highmem_page) {
2394 		void *dst;
2395 
2396 		dst = kmap_atomic(last_highmem_page);
2397 		copy_page(dst, buffer);
2398 		kunmap_atomic(dst);
2399 		last_highmem_page = NULL;
2400 	}
2401 }
2402 
last_highmem_page_copied(void)2403 static inline int last_highmem_page_copied(void)
2404 {
2405 	return !last_highmem_page;
2406 }
2407 
free_highmem_data(void)2408 static inline void free_highmem_data(void)
2409 {
2410 	if (safe_highmem_bm)
2411 		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2412 
2413 	if (buffer)
2414 		free_image_page(buffer, PG_UNSAFE_CLEAR);
2415 }
2416 #else
count_highmem_image_pages(struct memory_bitmap * bm)2417 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2418 
prepare_highmem_image(struct memory_bitmap * bm,unsigned int * nr_highmem_p)2419 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2420 					unsigned int *nr_highmem_p) { return 0; }
2421 
get_highmem_page_buffer(struct page * page,struct chain_allocator * ca)2422 static inline void *get_highmem_page_buffer(struct page *page,
2423 					    struct chain_allocator *ca)
2424 {
2425 	return ERR_PTR(-EINVAL);
2426 }
2427 
copy_last_highmem_page(void)2428 static inline void copy_last_highmem_page(void) {}
last_highmem_page_copied(void)2429 static inline int last_highmem_page_copied(void) { return 1; }
free_highmem_data(void)2430 static inline void free_highmem_data(void) {}
2431 #endif /* CONFIG_HIGHMEM */
2432 
2433 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2434 
2435 /**
2436  * prepare_image - Make room for loading hibernation image.
2437  * @new_bm: Unitialized memory bitmap structure.
2438  * @bm: Memory bitmap with unsafe pages marked.
2439  *
2440  * Use @bm to mark the pages that will be overwritten in the process of
2441  * restoring the system memory state from the suspend image ("unsafe" pages)
2442  * and allocate memory for the image.
2443  *
2444  * The idea is to allocate a new memory bitmap first and then allocate
2445  * as many pages as needed for image data, but without specifying what those
2446  * pages will be used for just yet.  Instead, we mark them all as allocated and
2447  * create a lists of "safe" pages to be used later.  On systems with high
2448  * memory a list of "safe" highmem pages is created too.
2449  */
prepare_image(struct memory_bitmap * new_bm,struct memory_bitmap * bm)2450 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2451 {
2452 	unsigned int nr_pages, nr_highmem;
2453 	struct linked_page *lp;
2454 	int error;
2455 
2456 	/* If there is no highmem, the buffer will not be necessary */
2457 	free_image_page(buffer, PG_UNSAFE_CLEAR);
2458 	buffer = NULL;
2459 
2460 	nr_highmem = count_highmem_image_pages(bm);
2461 	mark_unsafe_pages(bm);
2462 
2463 	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2464 	if (error)
2465 		goto Free;
2466 
2467 	duplicate_memory_bitmap(new_bm, bm);
2468 	memory_bm_free(bm, PG_UNSAFE_KEEP);
2469 	if (nr_highmem > 0) {
2470 		error = prepare_highmem_image(bm, &nr_highmem);
2471 		if (error)
2472 			goto Free;
2473 	}
2474 	/*
2475 	 * Reserve some safe pages for potential later use.
2476 	 *
2477 	 * NOTE: This way we make sure there will be enough safe pages for the
2478 	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2479 	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2480 	 *
2481 	 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2482 	 */
2483 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2484 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2485 	while (nr_pages > 0) {
2486 		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2487 		if (!lp) {
2488 			error = -ENOMEM;
2489 			goto Free;
2490 		}
2491 		lp->next = safe_pages_list;
2492 		safe_pages_list = lp;
2493 		nr_pages--;
2494 	}
2495 	/* Preallocate memory for the image */
2496 	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2497 	while (nr_pages > 0) {
2498 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2499 		if (!lp) {
2500 			error = -ENOMEM;
2501 			goto Free;
2502 		}
2503 		if (!swsusp_page_is_free(virt_to_page(lp))) {
2504 			/* The page is "safe", add it to the list */
2505 			lp->next = safe_pages_list;
2506 			safe_pages_list = lp;
2507 		}
2508 		/* Mark the page as allocated */
2509 		swsusp_set_page_forbidden(virt_to_page(lp));
2510 		swsusp_set_page_free(virt_to_page(lp));
2511 		nr_pages--;
2512 	}
2513 	return 0;
2514 
2515  Free:
2516 	swsusp_free();
2517 	return error;
2518 }
2519 
2520 /**
2521  * get_buffer - Get the address to store the next image data page.
2522  *
2523  * Get the address that snapshot_write_next() should return to its caller to
2524  * write to.
2525  */
get_buffer(struct memory_bitmap * bm,struct chain_allocator * ca)2526 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2527 {
2528 	struct pbe *pbe;
2529 	struct page *page;
2530 	unsigned long pfn = memory_bm_next_pfn(bm);
2531 
2532 	if (pfn == BM_END_OF_MAP)
2533 		return ERR_PTR(-EFAULT);
2534 
2535 	page = pfn_to_page(pfn);
2536 	if (PageHighMem(page))
2537 		return get_highmem_page_buffer(page, ca);
2538 
2539 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2540 		/*
2541 		 * We have allocated the "original" page frame and we can
2542 		 * use it directly to store the loaded page.
2543 		 */
2544 		return page_address(page);
2545 
2546 	/*
2547 	 * The "original" page frame has not been allocated and we have to
2548 	 * use a "safe" page frame to store the loaded page.
2549 	 */
2550 	pbe = chain_alloc(ca, sizeof(struct pbe));
2551 	if (!pbe) {
2552 		swsusp_free();
2553 		return ERR_PTR(-ENOMEM);
2554 	}
2555 	pbe->orig_address = page_address(page);
2556 	pbe->address = safe_pages_list;
2557 	safe_pages_list = safe_pages_list->next;
2558 	pbe->next = restore_pblist;
2559 	restore_pblist = pbe;
2560 	return pbe->address;
2561 }
2562 
2563 /**
2564  * snapshot_write_next - Get the address to store the next image page.
2565  * @handle: Snapshot handle structure to guide the writing.
2566  *
2567  * On the first call, @handle should point to a zeroed snapshot_handle
2568  * structure.  The structure gets populated then and a pointer to it should be
2569  * passed to this function every next time.
2570  *
2571  * On success, the function returns a positive number.  Then, the caller
2572  * is allowed to write up to the returned number of bytes to the memory
2573  * location computed by the data_of() macro.
2574  *
2575  * The function returns 0 to indicate the "end of file" condition.  Negative
2576  * numbers are returned on errors, in which cases the structure pointed to by
2577  * @handle is not updated and should not be used any more.
2578  */
snapshot_write_next(struct snapshot_handle * handle)2579 int snapshot_write_next(struct snapshot_handle *handle)
2580 {
2581 	static struct chain_allocator ca;
2582 	int error = 0;
2583 
2584 	/* Check if we have already loaded the entire image */
2585 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2586 		return 0;
2587 
2588 	handle->sync_read = 1;
2589 
2590 	if (!handle->cur) {
2591 		if (!buffer)
2592 			/* This makes the buffer be freed by swsusp_free() */
2593 			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2594 
2595 		if (!buffer)
2596 			return -ENOMEM;
2597 
2598 		handle->buffer = buffer;
2599 	} else if (handle->cur == 1) {
2600 		error = load_header(buffer);
2601 		if (error)
2602 			return error;
2603 
2604 		safe_pages_list = NULL;
2605 
2606 		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2607 		if (error)
2608 			return error;
2609 
2610 		hibernate_restore_protection_begin();
2611 	} else if (handle->cur <= nr_meta_pages + 1) {
2612 		error = unpack_orig_pfns(buffer, &copy_bm);
2613 		if (error)
2614 			return error;
2615 
2616 		if (handle->cur == nr_meta_pages + 1) {
2617 			error = prepare_image(&orig_bm, &copy_bm);
2618 			if (error)
2619 				return error;
2620 
2621 			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2622 			memory_bm_position_reset(&orig_bm);
2623 			restore_pblist = NULL;
2624 			handle->buffer = get_buffer(&orig_bm, &ca);
2625 			handle->sync_read = 0;
2626 			if (IS_ERR(handle->buffer))
2627 				return PTR_ERR(handle->buffer);
2628 		}
2629 	} else {
2630 		copy_last_highmem_page();
2631 		hibernate_restore_protect_page(handle->buffer);
2632 		handle->buffer = get_buffer(&orig_bm, &ca);
2633 		if (IS_ERR(handle->buffer))
2634 			return PTR_ERR(handle->buffer);
2635 		if (handle->buffer != buffer)
2636 			handle->sync_read = 0;
2637 	}
2638 	handle->cur++;
2639 	return PAGE_SIZE;
2640 }
2641 
2642 /**
2643  * snapshot_write_finalize - Complete the loading of a hibernation image.
2644  *
2645  * Must be called after the last call to snapshot_write_next() in case the last
2646  * page in the image happens to be a highmem page and its contents should be
2647  * stored in highmem.  Additionally, it recycles bitmap memory that's not
2648  * necessary any more.
2649  */
snapshot_write_finalize(struct snapshot_handle * handle)2650 void snapshot_write_finalize(struct snapshot_handle *handle)
2651 {
2652 	copy_last_highmem_page();
2653 	hibernate_restore_protect_page(handle->buffer);
2654 	/* Do that only if we have loaded the image entirely */
2655 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2656 		memory_bm_recycle(&orig_bm);
2657 		free_highmem_data();
2658 	}
2659 }
2660 
snapshot_image_loaded(struct snapshot_handle * handle)2661 int snapshot_image_loaded(struct snapshot_handle *handle)
2662 {
2663 	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2664 			handle->cur <= nr_meta_pages + nr_copy_pages);
2665 }
2666 
2667 #ifdef CONFIG_HIGHMEM
2668 /* Assumes that @buf is ready and points to a "safe" page */
swap_two_pages_data(struct page * p1,struct page * p2,void * buf)2669 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2670 				       void *buf)
2671 {
2672 	void *kaddr1, *kaddr2;
2673 
2674 	kaddr1 = kmap_atomic(p1);
2675 	kaddr2 = kmap_atomic(p2);
2676 	copy_page(buf, kaddr1);
2677 	copy_page(kaddr1, kaddr2);
2678 	copy_page(kaddr2, buf);
2679 	kunmap_atomic(kaddr2);
2680 	kunmap_atomic(kaddr1);
2681 }
2682 
2683 /**
2684  * restore_highmem - Put highmem image pages into their original locations.
2685  *
2686  * For each highmem page that was in use before hibernation and is included in
2687  * the image, and also has been allocated by the "restore" kernel, swap its
2688  * current contents with the previous (ie. "before hibernation") ones.
2689  *
2690  * If the restore eventually fails, we can call this function once again and
2691  * restore the highmem state as seen by the restore kernel.
2692  */
restore_highmem(void)2693 int restore_highmem(void)
2694 {
2695 	struct highmem_pbe *pbe = highmem_pblist;
2696 	void *buf;
2697 
2698 	if (!pbe)
2699 		return 0;
2700 
2701 	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2702 	if (!buf)
2703 		return -ENOMEM;
2704 
2705 	while (pbe) {
2706 		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2707 		pbe = pbe->next;
2708 	}
2709 	free_image_page(buf, PG_UNSAFE_CLEAR);
2710 	return 0;
2711 }
2712 #endif /* CONFIG_HIGHMEM */
2713