• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/kernel/power/snapshot.c
4  *
5  * This file provides system snapshot/restore functionality for swsusp.
6  *
7  * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9  */
10 
11 #define pr_fmt(fmt) "PM: hibernation: " fmt
12 
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/memblock.h>
25 #include <linux/nmi.h>
26 #include <linux/syscalls.h>
27 #include <linux/console.h>
28 #include <linux/highmem.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
31 #include <linux/compiler.h>
32 #include <linux/ktime.h>
33 #include <linux/set_memory.h>
34 
35 #include <linux/uaccess.h>
36 #include <asm/mmu_context.h>
37 #include <asm/tlbflush.h>
38 #include <asm/io.h>
39 
40 #include "power.h"
41 
42 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
43 static bool hibernate_restore_protection;
44 static bool hibernate_restore_protection_active;
45 
enable_restore_image_protection(void)46 void enable_restore_image_protection(void)
47 {
48 	hibernate_restore_protection = true;
49 }
50 
hibernate_restore_protection_begin(void)51 static inline void hibernate_restore_protection_begin(void)
52 {
53 	hibernate_restore_protection_active = hibernate_restore_protection;
54 }
55 
hibernate_restore_protection_end(void)56 static inline void hibernate_restore_protection_end(void)
57 {
58 	hibernate_restore_protection_active = false;
59 }
60 
hibernate_restore_protect_page(void * page_address)61 static inline void hibernate_restore_protect_page(void *page_address)
62 {
63 	if (hibernate_restore_protection_active)
64 		set_memory_ro((unsigned long)page_address, 1);
65 }
66 
hibernate_restore_unprotect_page(void * page_address)67 static inline void hibernate_restore_unprotect_page(void *page_address)
68 {
69 	if (hibernate_restore_protection_active)
70 		set_memory_rw((unsigned long)page_address, 1);
71 }
72 #else
hibernate_restore_protection_begin(void)73 static inline void hibernate_restore_protection_begin(void) {}
hibernate_restore_protection_end(void)74 static inline void hibernate_restore_protection_end(void) {}
hibernate_restore_protect_page(void * page_address)75 static inline void hibernate_restore_protect_page(void *page_address) {}
hibernate_restore_unprotect_page(void * page_address)76 static inline void hibernate_restore_unprotect_page(void *page_address) {}
77 #endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
78 
79 
80 /*
81  * The calls to set_direct_map_*() should not fail because remapping a page
82  * here means that we only update protection bits in an existing PTE.
83  * It is still worth to have a warning here if something changes and this
84  * will no longer be the case.
85  */
hibernate_map_page(struct page * page)86 static inline void hibernate_map_page(struct page *page)
87 {
88 	if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
89 		int ret = set_direct_map_default_noflush(page);
90 
91 		if (ret)
92 			pr_warn_once("Failed to remap page\n");
93 	} else {
94 		debug_pagealloc_map_pages(page, 1);
95 	}
96 }
97 
hibernate_unmap_page(struct page * page)98 static inline void hibernate_unmap_page(struct page *page)
99 {
100 	if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
101 		unsigned long addr = (unsigned long)page_address(page);
102 		int ret  = set_direct_map_invalid_noflush(page);
103 
104 		if (ret)
105 			pr_warn_once("Failed to remap page\n");
106 
107 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
108 	} else {
109 		debug_pagealloc_unmap_pages(page, 1);
110 	}
111 }
112 
113 static int swsusp_page_is_free(struct page *);
114 static void swsusp_set_page_forbidden(struct page *);
115 static void swsusp_unset_page_forbidden(struct page *);
116 
117 /*
118  * Number of bytes to reserve for memory allocations made by device drivers
119  * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
120  * cause image creation to fail (tunable via /sys/power/reserved_size).
121  */
122 unsigned long reserved_size;
123 
hibernate_reserved_size_init(void)124 void __init hibernate_reserved_size_init(void)
125 {
126 	reserved_size = SPARE_PAGES * PAGE_SIZE;
127 }
128 
129 /*
130  * Preferred image size in bytes (tunable via /sys/power/image_size).
131  * When it is set to N, swsusp will do its best to ensure the image
132  * size will not exceed N bytes, but if that is impossible, it will
133  * try to create the smallest image possible.
134  */
135 unsigned long image_size;
136 
hibernate_image_size_init(void)137 void __init hibernate_image_size_init(void)
138 {
139 	image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
140 }
141 
142 /*
143  * List of PBEs needed for restoring the pages that were allocated before
144  * the suspend and included in the suspend image, but have also been
145  * allocated by the "resume" kernel, so their contents cannot be written
146  * directly to their "original" page frames.
147  */
148 struct pbe *restore_pblist;
149 
150 /* struct linked_page is used to build chains of pages */
151 
152 #define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
153 
154 struct linked_page {
155 	struct linked_page *next;
156 	char data[LINKED_PAGE_DATA_SIZE];
157 } __packed;
158 
159 /*
160  * List of "safe" pages (ie. pages that were not used by the image kernel
161  * before hibernation) that may be used as temporary storage for image kernel
162  * memory contents.
163  */
164 static struct linked_page *safe_pages_list;
165 
166 /* Pointer to an auxiliary buffer (1 page) */
167 static void *buffer;
168 
169 #define PG_ANY		0
170 #define PG_SAFE		1
171 #define PG_UNSAFE_CLEAR	1
172 #define PG_UNSAFE_KEEP	0
173 
174 static unsigned int allocated_unsafe_pages;
175 
176 /**
177  * get_image_page - Allocate a page for a hibernation image.
178  * @gfp_mask: GFP mask for the allocation.
179  * @safe_needed: Get pages that were not used before hibernation (restore only)
180  *
181  * During image restoration, for storing the PBE list and the image data, we can
182  * only use memory pages that do not conflict with the pages used before
183  * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
184  * using allocated_unsafe_pages.
185  *
186  * Each allocated image page is marked as PageNosave and PageNosaveFree so that
187  * swsusp_free() can release it.
188  */
get_image_page(gfp_t gfp_mask,int safe_needed)189 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
190 {
191 	void *res;
192 
193 	res = (void *)get_zeroed_page(gfp_mask);
194 	if (safe_needed)
195 		while (res && swsusp_page_is_free(virt_to_page(res))) {
196 			/* The page is unsafe, mark it for swsusp_free() */
197 			swsusp_set_page_forbidden(virt_to_page(res));
198 			allocated_unsafe_pages++;
199 			res = (void *)get_zeroed_page(gfp_mask);
200 		}
201 	if (res) {
202 		swsusp_set_page_forbidden(virt_to_page(res));
203 		swsusp_set_page_free(virt_to_page(res));
204 	}
205 	return res;
206 }
207 
__get_safe_page(gfp_t gfp_mask)208 static void *__get_safe_page(gfp_t gfp_mask)
209 {
210 	if (safe_pages_list) {
211 		void *ret = safe_pages_list;
212 
213 		safe_pages_list = safe_pages_list->next;
214 		memset(ret, 0, PAGE_SIZE);
215 		return ret;
216 	}
217 	return get_image_page(gfp_mask, PG_SAFE);
218 }
219 
get_safe_page(gfp_t gfp_mask)220 unsigned long get_safe_page(gfp_t gfp_mask)
221 {
222 	return (unsigned long)__get_safe_page(gfp_mask);
223 }
224 
alloc_image_page(gfp_t gfp_mask)225 static struct page *alloc_image_page(gfp_t gfp_mask)
226 {
227 	struct page *page;
228 
229 	page = alloc_page(gfp_mask);
230 	if (page) {
231 		swsusp_set_page_forbidden(page);
232 		swsusp_set_page_free(page);
233 	}
234 	return page;
235 }
236 
recycle_safe_page(void * page_address)237 static void recycle_safe_page(void *page_address)
238 {
239 	struct linked_page *lp = page_address;
240 
241 	lp->next = safe_pages_list;
242 	safe_pages_list = lp;
243 }
244 
245 /**
246  * free_image_page - Free a page allocated for hibernation image.
247  * @addr: Address of the page to free.
248  * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
249  *
250  * The page to free should have been allocated by get_image_page() (page flags
251  * set by it are affected).
252  */
free_image_page(void * addr,int clear_nosave_free)253 static inline void free_image_page(void *addr, int clear_nosave_free)
254 {
255 	struct page *page;
256 
257 	BUG_ON(!virt_addr_valid(addr));
258 
259 	page = virt_to_page(addr);
260 
261 	swsusp_unset_page_forbidden(page);
262 	if (clear_nosave_free)
263 		swsusp_unset_page_free(page);
264 
265 	__free_page(page);
266 }
267 
free_list_of_pages(struct linked_page * list,int clear_page_nosave)268 static inline void free_list_of_pages(struct linked_page *list,
269 				      int clear_page_nosave)
270 {
271 	while (list) {
272 		struct linked_page *lp = list->next;
273 
274 		free_image_page(list, clear_page_nosave);
275 		list = lp;
276 	}
277 }
278 
279 /*
280  * struct chain_allocator is used for allocating small objects out of
281  * a linked list of pages called 'the chain'.
282  *
283  * The chain grows each time when there is no room for a new object in
284  * the current page.  The allocated objects cannot be freed individually.
285  * It is only possible to free them all at once, by freeing the entire
286  * chain.
287  *
288  * NOTE: The chain allocator may be inefficient if the allocated objects
289  * are not much smaller than PAGE_SIZE.
290  */
291 struct chain_allocator {
292 	struct linked_page *chain;	/* the chain */
293 	unsigned int used_space;	/* total size of objects allocated out
294 					   of the current page */
295 	gfp_t gfp_mask;		/* mask for allocating pages */
296 	int safe_needed;	/* if set, only "safe" pages are allocated */
297 };
298 
chain_init(struct chain_allocator * ca,gfp_t gfp_mask,int safe_needed)299 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
300 		       int safe_needed)
301 {
302 	ca->chain = NULL;
303 	ca->used_space = LINKED_PAGE_DATA_SIZE;
304 	ca->gfp_mask = gfp_mask;
305 	ca->safe_needed = safe_needed;
306 }
307 
chain_alloc(struct chain_allocator * ca,unsigned int size)308 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
309 {
310 	void *ret;
311 
312 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
313 		struct linked_page *lp;
314 
315 		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
316 					get_image_page(ca->gfp_mask, PG_ANY);
317 		if (!lp)
318 			return NULL;
319 
320 		lp->next = ca->chain;
321 		ca->chain = lp;
322 		ca->used_space = 0;
323 	}
324 	ret = ca->chain->data + ca->used_space;
325 	ca->used_space += size;
326 	return ret;
327 }
328 
329 /*
330  * Data types related to memory bitmaps.
331  *
332  * Memory bitmap is a structure consisting of many linked lists of
333  * objects.  The main list's elements are of type struct zone_bitmap
334  * and each of them corresponds to one zone.  For each zone bitmap
335  * object there is a list of objects of type struct bm_block that
336  * represent each blocks of bitmap in which information is stored.
337  *
338  * struct memory_bitmap contains a pointer to the main list of zone
339  * bitmap objects, a struct bm_position used for browsing the bitmap,
340  * and a pointer to the list of pages used for allocating all of the
341  * zone bitmap objects and bitmap block objects.
342  *
343  * NOTE: It has to be possible to lay out the bitmap in memory
344  * using only allocations of order 0.  Additionally, the bitmap is
345  * designed to work with arbitrary number of zones (this is over the
346  * top for now, but let's avoid making unnecessary assumptions ;-).
347  *
348  * struct zone_bitmap contains a pointer to a list of bitmap block
349  * objects and a pointer to the bitmap block object that has been
350  * most recently used for setting bits.  Additionally, it contains the
351  * PFNs that correspond to the start and end of the represented zone.
352  *
353  * struct bm_block contains a pointer to the memory page in which
354  * information is stored (in the form of a block of bitmap)
355  * It also contains the pfns that correspond to the start and end of
356  * the represented memory area.
357  *
358  * The memory bitmap is organized as a radix tree to guarantee fast random
359  * access to the bits. There is one radix tree for each zone (as returned
360  * from create_mem_extents).
361  *
362  * One radix tree is represented by one struct mem_zone_bm_rtree. There are
363  * two linked lists for the nodes of the tree, one for the inner nodes and
364  * one for the leave nodes. The linked leave nodes are used for fast linear
365  * access of the memory bitmap.
366  *
367  * The struct rtree_node represents one node of the radix tree.
368  */
369 
370 #define BM_END_OF_MAP	(~0UL)
371 
372 #define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
373 #define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
374 #define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
375 
376 /*
377  * struct rtree_node is a wrapper struct to link the nodes
378  * of the rtree together for easy linear iteration over
379  * bits and easy freeing
380  */
381 struct rtree_node {
382 	struct list_head list;
383 	unsigned long *data;
384 };
385 
386 /*
387  * struct mem_zone_bm_rtree represents a bitmap used for one
388  * populated memory zone.
389  */
390 struct mem_zone_bm_rtree {
391 	struct list_head list;		/* Link Zones together         */
392 	struct list_head nodes;		/* Radix Tree inner nodes      */
393 	struct list_head leaves;	/* Radix Tree leaves           */
394 	unsigned long start_pfn;	/* Zone start page frame       */
395 	unsigned long end_pfn;		/* Zone end page frame + 1     */
396 	struct rtree_node *rtree;	/* Radix Tree Root             */
397 	int levels;			/* Number of Radix Tree Levels */
398 	unsigned int blocks;		/* Number of Bitmap Blocks     */
399 };
400 
401 /* strcut bm_position is used for browsing memory bitmaps */
402 
403 struct bm_position {
404 	struct mem_zone_bm_rtree *zone;
405 	struct rtree_node *node;
406 	unsigned long node_pfn;
407 	unsigned long cur_pfn;
408 	int node_bit;
409 };
410 
411 struct memory_bitmap {
412 	struct list_head zones;
413 	struct linked_page *p_list;	/* list of pages used to store zone
414 					   bitmap objects and bitmap block
415 					   objects */
416 	struct bm_position cur;	/* most recently used bit position */
417 };
418 
419 /* Functions that operate on memory bitmaps */
420 
421 #define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
422 #if BITS_PER_LONG == 32
423 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
424 #else
425 #define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
426 #endif
427 #define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
428 
429 /**
430  * alloc_rtree_node - Allocate a new node and add it to the radix tree.
431  * @gfp_mask: GFP mask for the allocation.
432  * @safe_needed: Get pages not used before hibernation (restore only)
433  * @ca: Pointer to a linked list of pages ("a chain") to allocate from
434  * @list: Radix Tree node to add.
435  *
436  * This function is used to allocate inner nodes as well as the
437  * leave nodes of the radix tree. It also adds the node to the
438  * corresponding linked list passed in by the *list parameter.
439  */
alloc_rtree_node(gfp_t gfp_mask,int safe_needed,struct chain_allocator * ca,struct list_head * list)440 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
441 					   struct chain_allocator *ca,
442 					   struct list_head *list)
443 {
444 	struct rtree_node *node;
445 
446 	node = chain_alloc(ca, sizeof(struct rtree_node));
447 	if (!node)
448 		return NULL;
449 
450 	node->data = get_image_page(gfp_mask, safe_needed);
451 	if (!node->data)
452 		return NULL;
453 
454 	list_add_tail(&node->list, list);
455 
456 	return node;
457 }
458 
459 /**
460  * add_rtree_block - Add a new leave node to the radix tree.
461  *
462  * The leave nodes need to be allocated in order to keep the leaves
463  * linked list in order. This is guaranteed by the zone->blocks
464  * counter.
465  */
add_rtree_block(struct mem_zone_bm_rtree * zone,gfp_t gfp_mask,int safe_needed,struct chain_allocator * ca)466 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
467 			   int safe_needed, struct chain_allocator *ca)
468 {
469 	struct rtree_node *node, *block, **dst;
470 	unsigned int levels_needed, block_nr;
471 	int i;
472 
473 	block_nr = zone->blocks;
474 	levels_needed = 0;
475 
476 	/* How many levels do we need for this block nr? */
477 	while (block_nr) {
478 		levels_needed += 1;
479 		block_nr >>= BM_RTREE_LEVEL_SHIFT;
480 	}
481 
482 	/* Make sure the rtree has enough levels */
483 	for (i = zone->levels; i < levels_needed; i++) {
484 		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
485 					&zone->nodes);
486 		if (!node)
487 			return -ENOMEM;
488 
489 		node->data[0] = (unsigned long)zone->rtree;
490 		zone->rtree = node;
491 		zone->levels += 1;
492 	}
493 
494 	/* Allocate new block */
495 	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
496 	if (!block)
497 		return -ENOMEM;
498 
499 	/* Now walk the rtree to insert the block */
500 	node = zone->rtree;
501 	dst = &zone->rtree;
502 	block_nr = zone->blocks;
503 	for (i = zone->levels; i > 0; i--) {
504 		int index;
505 
506 		if (!node) {
507 			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
508 						&zone->nodes);
509 			if (!node)
510 				return -ENOMEM;
511 			*dst = node;
512 		}
513 
514 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
515 		index &= BM_RTREE_LEVEL_MASK;
516 		dst = (struct rtree_node **)&((*dst)->data[index]);
517 		node = *dst;
518 	}
519 
520 	zone->blocks += 1;
521 	*dst = block;
522 
523 	return 0;
524 }
525 
526 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
527 			       int clear_nosave_free);
528 
529 /**
530  * create_zone_bm_rtree - Create a radix tree for one zone.
531  *
532  * Allocated the mem_zone_bm_rtree structure and initializes it.
533  * This function also allocated and builds the radix tree for the
534  * zone.
535  */
create_zone_bm_rtree(gfp_t gfp_mask,int safe_needed,struct chain_allocator * ca,unsigned long start,unsigned long end)536 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
537 						      int safe_needed,
538 						      struct chain_allocator *ca,
539 						      unsigned long start,
540 						      unsigned long end)
541 {
542 	struct mem_zone_bm_rtree *zone;
543 	unsigned int i, nr_blocks;
544 	unsigned long pages;
545 
546 	pages = end - start;
547 	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
548 	if (!zone)
549 		return NULL;
550 
551 	INIT_LIST_HEAD(&zone->nodes);
552 	INIT_LIST_HEAD(&zone->leaves);
553 	zone->start_pfn = start;
554 	zone->end_pfn = end;
555 	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
556 
557 	for (i = 0; i < nr_blocks; i++) {
558 		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
559 			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
560 			return NULL;
561 		}
562 	}
563 
564 	return zone;
565 }
566 
567 /**
568  * free_zone_bm_rtree - Free the memory of the radix tree.
569  *
570  * Free all node pages of the radix tree. The mem_zone_bm_rtree
571  * structure itself is not freed here nor are the rtree_node
572  * structs.
573  */
free_zone_bm_rtree(struct mem_zone_bm_rtree * zone,int clear_nosave_free)574 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
575 			       int clear_nosave_free)
576 {
577 	struct rtree_node *node;
578 
579 	list_for_each_entry(node, &zone->nodes, list)
580 		free_image_page(node->data, clear_nosave_free);
581 
582 	list_for_each_entry(node, &zone->leaves, list)
583 		free_image_page(node->data, clear_nosave_free);
584 }
585 
memory_bm_position_reset(struct memory_bitmap * bm)586 static void memory_bm_position_reset(struct memory_bitmap *bm)
587 {
588 	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
589 				  list);
590 	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
591 				  struct rtree_node, list);
592 	bm->cur.node_pfn = 0;
593 	bm->cur.cur_pfn = BM_END_OF_MAP;
594 	bm->cur.node_bit = 0;
595 }
596 
597 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
598 
599 struct mem_extent {
600 	struct list_head hook;
601 	unsigned long start;
602 	unsigned long end;
603 };
604 
605 /**
606  * free_mem_extents - Free a list of memory extents.
607  * @list: List of extents to free.
608  */
free_mem_extents(struct list_head * list)609 static void free_mem_extents(struct list_head *list)
610 {
611 	struct mem_extent *ext, *aux;
612 
613 	list_for_each_entry_safe(ext, aux, list, hook) {
614 		list_del(&ext->hook);
615 		kfree(ext);
616 	}
617 }
618 
619 /**
620  * create_mem_extents - Create a list of memory extents.
621  * @list: List to put the extents into.
622  * @gfp_mask: Mask to use for memory allocations.
623  *
624  * The extents represent contiguous ranges of PFNs.
625  */
create_mem_extents(struct list_head * list,gfp_t gfp_mask)626 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
627 {
628 	struct zone *zone;
629 
630 	INIT_LIST_HEAD(list);
631 
632 	for_each_populated_zone(zone) {
633 		unsigned long zone_start, zone_end;
634 		struct mem_extent *ext, *cur, *aux;
635 
636 		zone_start = zone->zone_start_pfn;
637 		zone_end = zone_end_pfn(zone);
638 
639 		list_for_each_entry(ext, list, hook)
640 			if (zone_start <= ext->end)
641 				break;
642 
643 		if (&ext->hook == list || zone_end < ext->start) {
644 			/* New extent is necessary */
645 			struct mem_extent *new_ext;
646 
647 			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
648 			if (!new_ext) {
649 				free_mem_extents(list);
650 				return -ENOMEM;
651 			}
652 			new_ext->start = zone_start;
653 			new_ext->end = zone_end;
654 			list_add_tail(&new_ext->hook, &ext->hook);
655 			continue;
656 		}
657 
658 		/* Merge this zone's range of PFNs with the existing one */
659 		if (zone_start < ext->start)
660 			ext->start = zone_start;
661 		if (zone_end > ext->end)
662 			ext->end = zone_end;
663 
664 		/* More merging may be possible */
665 		cur = ext;
666 		list_for_each_entry_safe_continue(cur, aux, list, hook) {
667 			if (zone_end < cur->start)
668 				break;
669 			if (zone_end < cur->end)
670 				ext->end = cur->end;
671 			list_del(&cur->hook);
672 			kfree(cur);
673 		}
674 	}
675 
676 	return 0;
677 }
678 
679 /**
680  * memory_bm_create - Allocate memory for a memory bitmap.
681  */
memory_bm_create(struct memory_bitmap * bm,gfp_t gfp_mask,int safe_needed)682 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
683 			    int safe_needed)
684 {
685 	struct chain_allocator ca;
686 	struct list_head mem_extents;
687 	struct mem_extent *ext;
688 	int error;
689 
690 	chain_init(&ca, gfp_mask, safe_needed);
691 	INIT_LIST_HEAD(&bm->zones);
692 
693 	error = create_mem_extents(&mem_extents, gfp_mask);
694 	if (error)
695 		return error;
696 
697 	list_for_each_entry(ext, &mem_extents, hook) {
698 		struct mem_zone_bm_rtree *zone;
699 
700 		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
701 					    ext->start, ext->end);
702 		if (!zone) {
703 			error = -ENOMEM;
704 			goto Error;
705 		}
706 		list_add_tail(&zone->list, &bm->zones);
707 	}
708 
709 	bm->p_list = ca.chain;
710 	memory_bm_position_reset(bm);
711  Exit:
712 	free_mem_extents(&mem_extents);
713 	return error;
714 
715  Error:
716 	bm->p_list = ca.chain;
717 	memory_bm_free(bm, PG_UNSAFE_CLEAR);
718 	goto Exit;
719 }
720 
721 /**
722  * memory_bm_free - Free memory occupied by the memory bitmap.
723  * @bm: Memory bitmap.
724  */
memory_bm_free(struct memory_bitmap * bm,int clear_nosave_free)725 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
726 {
727 	struct mem_zone_bm_rtree *zone;
728 
729 	list_for_each_entry(zone, &bm->zones, list)
730 		free_zone_bm_rtree(zone, clear_nosave_free);
731 
732 	free_list_of_pages(bm->p_list, clear_nosave_free);
733 
734 	INIT_LIST_HEAD(&bm->zones);
735 }
736 
737 /**
738  * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
739  *
740  * Find the bit in memory bitmap @bm that corresponds to the given PFN.
741  * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
742  *
743  * Walk the radix tree to find the page containing the bit that represents @pfn
744  * and return the position of the bit in @addr and @bit_nr.
745  */
memory_bm_find_bit(struct memory_bitmap * bm,unsigned long pfn,void ** addr,unsigned int * bit_nr)746 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
747 			      void **addr, unsigned int *bit_nr)
748 {
749 	struct mem_zone_bm_rtree *curr, *zone;
750 	struct rtree_node *node;
751 	int i, block_nr;
752 
753 	zone = bm->cur.zone;
754 
755 	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
756 		goto zone_found;
757 
758 	zone = NULL;
759 
760 	/* Find the right zone */
761 	list_for_each_entry(curr, &bm->zones, list) {
762 		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
763 			zone = curr;
764 			break;
765 		}
766 	}
767 
768 	if (!zone)
769 		return -EFAULT;
770 
771 zone_found:
772 	/*
773 	 * We have found the zone. Now walk the radix tree to find the leaf node
774 	 * for our PFN.
775 	 */
776 
777 	/*
778 	 * If the zone we wish to scan is the current zone and the
779 	 * pfn falls into the current node then we do not need to walk
780 	 * the tree.
781 	 */
782 	node = bm->cur.node;
783 	if (zone == bm->cur.zone &&
784 	    ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
785 		goto node_found;
786 
787 	node      = zone->rtree;
788 	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
789 
790 	for (i = zone->levels; i > 0; i--) {
791 		int index;
792 
793 		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
794 		index &= BM_RTREE_LEVEL_MASK;
795 		BUG_ON(node->data[index] == 0);
796 		node = (struct rtree_node *)node->data[index];
797 	}
798 
799 node_found:
800 	/* Update last position */
801 	bm->cur.zone = zone;
802 	bm->cur.node = node;
803 	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
804 	bm->cur.cur_pfn = pfn;
805 
806 	/* Set return values */
807 	*addr = node->data;
808 	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
809 
810 	return 0;
811 }
812 
memory_bm_set_bit(struct memory_bitmap * bm,unsigned long pfn)813 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
814 {
815 	void *addr;
816 	unsigned int bit;
817 	int error;
818 
819 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
820 	BUG_ON(error);
821 	set_bit(bit, addr);
822 }
823 
mem_bm_set_bit_check(struct memory_bitmap * bm,unsigned long pfn)824 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
825 {
826 	void *addr;
827 	unsigned int bit;
828 	int error;
829 
830 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
831 	if (!error)
832 		set_bit(bit, addr);
833 
834 	return error;
835 }
836 
memory_bm_clear_bit(struct memory_bitmap * bm,unsigned long pfn)837 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
838 {
839 	void *addr;
840 	unsigned int bit;
841 	int error;
842 
843 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
844 	BUG_ON(error);
845 	clear_bit(bit, addr);
846 }
847 
memory_bm_clear_current(struct memory_bitmap * bm)848 static void memory_bm_clear_current(struct memory_bitmap *bm)
849 {
850 	int bit;
851 
852 	bit = max(bm->cur.node_bit - 1, 0);
853 	clear_bit(bit, bm->cur.node->data);
854 }
855 
memory_bm_get_current(struct memory_bitmap * bm)856 static unsigned long memory_bm_get_current(struct memory_bitmap *bm)
857 {
858 	return bm->cur.cur_pfn;
859 }
860 
memory_bm_test_bit(struct memory_bitmap * bm,unsigned long pfn)861 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
862 {
863 	void *addr;
864 	unsigned int bit;
865 	int error;
866 
867 	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
868 	BUG_ON(error);
869 	return test_bit(bit, addr);
870 }
871 
memory_bm_pfn_present(struct memory_bitmap * bm,unsigned long pfn)872 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
873 {
874 	void *addr;
875 	unsigned int bit;
876 
877 	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
878 }
879 
880 /*
881  * rtree_next_node - Jump to the next leaf node.
882  *
883  * Set the position to the beginning of the next node in the
884  * memory bitmap. This is either the next node in the current
885  * zone's radix tree or the first node in the radix tree of the
886  * next zone.
887  *
888  * Return true if there is a next node, false otherwise.
889  */
rtree_next_node(struct memory_bitmap * bm)890 static bool rtree_next_node(struct memory_bitmap *bm)
891 {
892 	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
893 		bm->cur.node = list_entry(bm->cur.node->list.next,
894 					  struct rtree_node, list);
895 		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
896 		bm->cur.node_bit  = 0;
897 		touch_softlockup_watchdog();
898 		return true;
899 	}
900 
901 	/* No more nodes, goto next zone */
902 	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
903 		bm->cur.zone = list_entry(bm->cur.zone->list.next,
904 				  struct mem_zone_bm_rtree, list);
905 		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
906 					  struct rtree_node, list);
907 		bm->cur.node_pfn = 0;
908 		bm->cur.node_bit = 0;
909 		return true;
910 	}
911 
912 	/* No more zones */
913 	return false;
914 }
915 
916 /**
917  * memory_bm_next_pfn - Find the next set bit in a memory bitmap.
918  * @bm: Memory bitmap.
919  *
920  * Starting from the last returned position this function searches for the next
921  * set bit in @bm and returns the PFN represented by it.  If no more bits are
922  * set, BM_END_OF_MAP is returned.
923  *
924  * It is required to run memory_bm_position_reset() before the first call to
925  * this function for the given memory bitmap.
926  */
memory_bm_next_pfn(struct memory_bitmap * bm)927 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
928 {
929 	unsigned long bits, pfn, pages;
930 	int bit;
931 
932 	do {
933 		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
934 		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
935 		bit	  = find_next_bit(bm->cur.node->data, bits,
936 					  bm->cur.node_bit);
937 		if (bit < bits) {
938 			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
939 			bm->cur.node_bit = bit + 1;
940 			bm->cur.cur_pfn = pfn;
941 			return pfn;
942 		}
943 	} while (rtree_next_node(bm));
944 
945 	bm->cur.cur_pfn = BM_END_OF_MAP;
946 	return BM_END_OF_MAP;
947 }
948 
949 /*
950  * This structure represents a range of page frames the contents of which
951  * should not be saved during hibernation.
952  */
953 struct nosave_region {
954 	struct list_head list;
955 	unsigned long start_pfn;
956 	unsigned long end_pfn;
957 };
958 
959 static LIST_HEAD(nosave_regions);
960 
recycle_zone_bm_rtree(struct mem_zone_bm_rtree * zone)961 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
962 {
963 	struct rtree_node *node;
964 
965 	list_for_each_entry(node, &zone->nodes, list)
966 		recycle_safe_page(node->data);
967 
968 	list_for_each_entry(node, &zone->leaves, list)
969 		recycle_safe_page(node->data);
970 }
971 
memory_bm_recycle(struct memory_bitmap * bm)972 static void memory_bm_recycle(struct memory_bitmap *bm)
973 {
974 	struct mem_zone_bm_rtree *zone;
975 	struct linked_page *p_list;
976 
977 	list_for_each_entry(zone, &bm->zones, list)
978 		recycle_zone_bm_rtree(zone);
979 
980 	p_list = bm->p_list;
981 	while (p_list) {
982 		struct linked_page *lp = p_list;
983 
984 		p_list = lp->next;
985 		recycle_safe_page(lp);
986 	}
987 }
988 
989 /**
990  * register_nosave_region - Register a region of unsaveable memory.
991  *
992  * Register a range of page frames the contents of which should not be saved
993  * during hibernation (to be used in the early initialization code).
994  */
register_nosave_region(unsigned long start_pfn,unsigned long end_pfn)995 void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
996 {
997 	struct nosave_region *region;
998 
999 	if (start_pfn >= end_pfn)
1000 		return;
1001 
1002 	if (!list_empty(&nosave_regions)) {
1003 		/* Try to extend the previous region (they should be sorted) */
1004 		region = list_entry(nosave_regions.prev,
1005 					struct nosave_region, list);
1006 		if (region->end_pfn == start_pfn) {
1007 			region->end_pfn = end_pfn;
1008 			goto Report;
1009 		}
1010 	}
1011 	/* This allocation cannot fail */
1012 	region = memblock_alloc(sizeof(struct nosave_region),
1013 				SMP_CACHE_BYTES);
1014 	if (!region)
1015 		panic("%s: Failed to allocate %zu bytes\n", __func__,
1016 		      sizeof(struct nosave_region));
1017 	region->start_pfn = start_pfn;
1018 	region->end_pfn = end_pfn;
1019 	list_add_tail(&region->list, &nosave_regions);
1020  Report:
1021 	pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
1022 		(unsigned long long) start_pfn << PAGE_SHIFT,
1023 		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1024 }
1025 
1026 /*
1027  * Set bits in this map correspond to the page frames the contents of which
1028  * should not be saved during the suspend.
1029  */
1030 static struct memory_bitmap *forbidden_pages_map;
1031 
1032 /* Set bits in this map correspond to free page frames. */
1033 static struct memory_bitmap *free_pages_map;
1034 
1035 /*
1036  * Each page frame allocated for creating the image is marked by setting the
1037  * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
1038  */
1039 
swsusp_set_page_free(struct page * page)1040 void swsusp_set_page_free(struct page *page)
1041 {
1042 	if (free_pages_map)
1043 		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
1044 }
1045 
swsusp_page_is_free(struct page * page)1046 static int swsusp_page_is_free(struct page *page)
1047 {
1048 	return free_pages_map ?
1049 		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1050 }
1051 
swsusp_unset_page_free(struct page * page)1052 void swsusp_unset_page_free(struct page *page)
1053 {
1054 	if (free_pages_map)
1055 		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1056 }
1057 
swsusp_set_page_forbidden(struct page * page)1058 static void swsusp_set_page_forbidden(struct page *page)
1059 {
1060 	if (forbidden_pages_map)
1061 		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1062 }
1063 
swsusp_page_is_forbidden(struct page * page)1064 int swsusp_page_is_forbidden(struct page *page)
1065 {
1066 	return forbidden_pages_map ?
1067 		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1068 }
1069 
swsusp_unset_page_forbidden(struct page * page)1070 static void swsusp_unset_page_forbidden(struct page *page)
1071 {
1072 	if (forbidden_pages_map)
1073 		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1074 }
1075 
1076 /**
1077  * mark_nosave_pages - Mark pages that should not be saved.
1078  * @bm: Memory bitmap.
1079  *
1080  * Set the bits in @bm that correspond to the page frames the contents of which
1081  * should not be saved.
1082  */
mark_nosave_pages(struct memory_bitmap * bm)1083 static void mark_nosave_pages(struct memory_bitmap *bm)
1084 {
1085 	struct nosave_region *region;
1086 
1087 	if (list_empty(&nosave_regions))
1088 		return;
1089 
1090 	list_for_each_entry(region, &nosave_regions, list) {
1091 		unsigned long pfn;
1092 
1093 		pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1094 			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1095 			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1096 				- 1);
1097 
1098 		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1099 			if (pfn_valid(pfn)) {
1100 				/*
1101 				 * It is safe to ignore the result of
1102 				 * mem_bm_set_bit_check() here, since we won't
1103 				 * touch the PFNs for which the error is
1104 				 * returned anyway.
1105 				 */
1106 				mem_bm_set_bit_check(bm, pfn);
1107 			}
1108 	}
1109 }
1110 
1111 /**
1112  * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1113  *
1114  * Create bitmaps needed for marking page frames that should not be saved and
1115  * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1116  * only modified if everything goes well, because we don't want the bits to be
1117  * touched before both bitmaps are set up.
1118  */
create_basic_memory_bitmaps(void)1119 int create_basic_memory_bitmaps(void)
1120 {
1121 	struct memory_bitmap *bm1, *bm2;
1122 	int error = 0;
1123 
1124 	if (forbidden_pages_map && free_pages_map)
1125 		return 0;
1126 	else
1127 		BUG_ON(forbidden_pages_map || free_pages_map);
1128 
1129 	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1130 	if (!bm1)
1131 		return -ENOMEM;
1132 
1133 	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1134 	if (error)
1135 		goto Free_first_object;
1136 
1137 	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1138 	if (!bm2)
1139 		goto Free_first_bitmap;
1140 
1141 	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1142 	if (error)
1143 		goto Free_second_object;
1144 
1145 	forbidden_pages_map = bm1;
1146 	free_pages_map = bm2;
1147 	mark_nosave_pages(forbidden_pages_map);
1148 
1149 	pr_debug("Basic memory bitmaps created\n");
1150 
1151 	return 0;
1152 
1153  Free_second_object:
1154 	kfree(bm2);
1155  Free_first_bitmap:
1156 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1157  Free_first_object:
1158 	kfree(bm1);
1159 	return -ENOMEM;
1160 }
1161 
1162 /**
1163  * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1164  *
1165  * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1166  * auxiliary pointers are necessary so that the bitmaps themselves are not
1167  * referred to while they are being freed.
1168  */
free_basic_memory_bitmaps(void)1169 void free_basic_memory_bitmaps(void)
1170 {
1171 	struct memory_bitmap *bm1, *bm2;
1172 
1173 	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1174 		return;
1175 
1176 	bm1 = forbidden_pages_map;
1177 	bm2 = free_pages_map;
1178 	forbidden_pages_map = NULL;
1179 	free_pages_map = NULL;
1180 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1181 	kfree(bm1);
1182 	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1183 	kfree(bm2);
1184 
1185 	pr_debug("Basic memory bitmaps freed\n");
1186 }
1187 
clear_or_poison_free_page(struct page * page)1188 static void clear_or_poison_free_page(struct page *page)
1189 {
1190 	if (page_poisoning_enabled_static())
1191 		__kernel_poison_pages(page, 1);
1192 	else if (want_init_on_free())
1193 		clear_highpage(page);
1194 }
1195 
clear_or_poison_free_pages(void)1196 void clear_or_poison_free_pages(void)
1197 {
1198 	struct memory_bitmap *bm = free_pages_map;
1199 	unsigned long pfn;
1200 
1201 	if (WARN_ON(!(free_pages_map)))
1202 		return;
1203 
1204 	if (page_poisoning_enabled() || want_init_on_free()) {
1205 		memory_bm_position_reset(bm);
1206 		pfn = memory_bm_next_pfn(bm);
1207 		while (pfn != BM_END_OF_MAP) {
1208 			if (pfn_valid(pfn))
1209 				clear_or_poison_free_page(pfn_to_page(pfn));
1210 
1211 			pfn = memory_bm_next_pfn(bm);
1212 		}
1213 		memory_bm_position_reset(bm);
1214 		pr_info("free pages cleared after restore\n");
1215 	}
1216 }
1217 
1218 /**
1219  * snapshot_additional_pages - Estimate the number of extra pages needed.
1220  * @zone: Memory zone to carry out the computation for.
1221  *
1222  * Estimate the number of additional pages needed for setting up a hibernation
1223  * image data structures for @zone (usually, the returned value is greater than
1224  * the exact number).
1225  */
snapshot_additional_pages(struct zone * zone)1226 unsigned int snapshot_additional_pages(struct zone *zone)
1227 {
1228 	unsigned int rtree, nodes;
1229 
1230 	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1231 	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1232 			      LINKED_PAGE_DATA_SIZE);
1233 	while (nodes > 1) {
1234 		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1235 		rtree += nodes;
1236 	}
1237 
1238 	return 2 * rtree;
1239 }
1240 
1241 #ifdef CONFIG_HIGHMEM
1242 /**
1243  * count_free_highmem_pages - Compute the total number of free highmem pages.
1244  *
1245  * The returned number is system-wide.
1246  */
count_free_highmem_pages(void)1247 static unsigned int count_free_highmem_pages(void)
1248 {
1249 	struct zone *zone;
1250 	unsigned int cnt = 0;
1251 
1252 	for_each_populated_zone(zone)
1253 		if (is_highmem(zone))
1254 			cnt += zone_page_state(zone, NR_FREE_PAGES);
1255 
1256 	return cnt;
1257 }
1258 
1259 /**
1260  * saveable_highmem_page - Check if a highmem page is saveable.
1261  *
1262  * Determine whether a highmem page should be included in a hibernation image.
1263  *
1264  * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1265  * and it isn't part of a free chunk of pages.
1266  */
saveable_highmem_page(struct zone * zone,unsigned long pfn)1267 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1268 {
1269 	struct page *page;
1270 
1271 	if (!pfn_valid(pfn))
1272 		return NULL;
1273 
1274 	page = pfn_to_online_page(pfn);
1275 	if (!page || page_zone(page) != zone)
1276 		return NULL;
1277 
1278 	BUG_ON(!PageHighMem(page));
1279 
1280 	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page))
1281 		return NULL;
1282 
1283 	if (PageReserved(page) || PageOffline(page))
1284 		return NULL;
1285 
1286 	if (page_is_guard(page))
1287 		return NULL;
1288 
1289 	return page;
1290 }
1291 
1292 /**
1293  * count_highmem_pages - Compute the total number of saveable highmem pages.
1294  */
count_highmem_pages(void)1295 static unsigned int count_highmem_pages(void)
1296 {
1297 	struct zone *zone;
1298 	unsigned int n = 0;
1299 
1300 	for_each_populated_zone(zone) {
1301 		unsigned long pfn, max_zone_pfn;
1302 
1303 		if (!is_highmem(zone))
1304 			continue;
1305 
1306 		mark_free_pages(zone);
1307 		max_zone_pfn = zone_end_pfn(zone);
1308 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1309 			if (saveable_highmem_page(zone, pfn))
1310 				n++;
1311 	}
1312 	return n;
1313 }
1314 #else
saveable_highmem_page(struct zone * z,unsigned long p)1315 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1316 {
1317 	return NULL;
1318 }
1319 #endif /* CONFIG_HIGHMEM */
1320 
1321 /**
1322  * saveable_page - Check if the given page is saveable.
1323  *
1324  * Determine whether a non-highmem page should be included in a hibernation
1325  * image.
1326  *
1327  * We should save the page if it isn't Nosave, and is not in the range
1328  * of pages statically defined as 'unsaveable', and it isn't part of
1329  * a free chunk of pages.
1330  */
saveable_page(struct zone * zone,unsigned long pfn)1331 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1332 {
1333 	struct page *page;
1334 
1335 	if (!pfn_valid(pfn))
1336 		return NULL;
1337 
1338 	page = pfn_to_online_page(pfn);
1339 	if (!page || page_zone(page) != zone)
1340 		return NULL;
1341 
1342 	BUG_ON(PageHighMem(page));
1343 
1344 	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1345 		return NULL;
1346 
1347 	if (PageOffline(page))
1348 		return NULL;
1349 
1350 	if (PageReserved(page)
1351 	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1352 		return NULL;
1353 
1354 	if (page_is_guard(page))
1355 		return NULL;
1356 
1357 	return page;
1358 }
1359 
1360 /**
1361  * count_data_pages - Compute the total number of saveable non-highmem pages.
1362  */
count_data_pages(void)1363 static unsigned int count_data_pages(void)
1364 {
1365 	struct zone *zone;
1366 	unsigned long pfn, max_zone_pfn;
1367 	unsigned int n = 0;
1368 
1369 	for_each_populated_zone(zone) {
1370 		if (is_highmem(zone))
1371 			continue;
1372 
1373 		mark_free_pages(zone);
1374 		max_zone_pfn = zone_end_pfn(zone);
1375 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1376 			if (saveable_page(zone, pfn))
1377 				n++;
1378 	}
1379 	return n;
1380 }
1381 
1382 /*
1383  * This is needed, because copy_page and memcpy are not usable for copying
1384  * task structs. Returns true if the page was filled with only zeros,
1385  * otherwise false.
1386  */
do_copy_page(long * dst,long * src)1387 static inline bool do_copy_page(long *dst, long *src)
1388 {
1389 	long z = 0;
1390 	int n;
1391 
1392 	for (n = PAGE_SIZE / sizeof(long); n; n--) {
1393 		z |= *src;
1394 		*dst++ = *src++;
1395 	}
1396 	return !z;
1397 }
1398 
1399 /**
1400  * safe_copy_page - Copy a page in a safe way.
1401  *
1402  * Check if the page we are going to copy is marked as present in the kernel
1403  * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1404  * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1405  * always returns 'true'. Returns true if the page was entirely composed of
1406  * zeros, otherwise it will return false.
1407  */
safe_copy_page(void * dst,struct page * s_page)1408 static bool safe_copy_page(void *dst, struct page *s_page)
1409 {
1410 	bool zeros_only;
1411 
1412 	if (kernel_page_present(s_page)) {
1413 		zeros_only = do_copy_page(dst, page_address(s_page));
1414 	} else {
1415 		hibernate_map_page(s_page);
1416 		zeros_only = do_copy_page(dst, page_address(s_page));
1417 		hibernate_unmap_page(s_page);
1418 	}
1419 	return zeros_only;
1420 }
1421 
1422 #ifdef CONFIG_HIGHMEM
page_is_saveable(struct zone * zone,unsigned long pfn)1423 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1424 {
1425 	return is_highmem(zone) ?
1426 		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1427 }
1428 
copy_data_page(unsigned long dst_pfn,unsigned long src_pfn)1429 static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1430 {
1431 	struct page *s_page, *d_page;
1432 	void *src, *dst;
1433 	bool zeros_only;
1434 
1435 	s_page = pfn_to_page(src_pfn);
1436 	d_page = pfn_to_page(dst_pfn);
1437 	if (PageHighMem(s_page)) {
1438 		src = kmap_atomic(s_page);
1439 		dst = kmap_atomic(d_page);
1440 		zeros_only = do_copy_page(dst, src);
1441 		kunmap_atomic(dst);
1442 		kunmap_atomic(src);
1443 	} else {
1444 		if (PageHighMem(d_page)) {
1445 			/*
1446 			 * The page pointed to by src may contain some kernel
1447 			 * data modified by kmap_atomic()
1448 			 */
1449 			zeros_only = safe_copy_page(buffer, s_page);
1450 			dst = kmap_atomic(d_page);
1451 			copy_page(dst, buffer);
1452 			kunmap_atomic(dst);
1453 		} else {
1454 			zeros_only = safe_copy_page(page_address(d_page), s_page);
1455 		}
1456 	}
1457 	return zeros_only;
1458 }
1459 #else
1460 #define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1461 
copy_data_page(unsigned long dst_pfn,unsigned long src_pfn)1462 static inline int copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1463 {
1464 	return safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1465 				pfn_to_page(src_pfn));
1466 }
1467 #endif /* CONFIG_HIGHMEM */
1468 
1469 /*
1470  * Copy data pages will copy all pages into pages pulled from the copy_bm.
1471  * If a page was entirely filled with zeros it will be marked in the zero_bm.
1472  *
1473  * Returns the number of pages copied.
1474  */
copy_data_pages(struct memory_bitmap * copy_bm,struct memory_bitmap * orig_bm,struct memory_bitmap * zero_bm)1475 static unsigned long copy_data_pages(struct memory_bitmap *copy_bm,
1476 			    struct memory_bitmap *orig_bm,
1477 			    struct memory_bitmap *zero_bm)
1478 {
1479 	unsigned long copied_pages = 0;
1480 	struct zone *zone;
1481 	unsigned long pfn, copy_pfn;
1482 
1483 	for_each_populated_zone(zone) {
1484 		unsigned long max_zone_pfn;
1485 
1486 		mark_free_pages(zone);
1487 		max_zone_pfn = zone_end_pfn(zone);
1488 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1489 			if (page_is_saveable(zone, pfn))
1490 				memory_bm_set_bit(orig_bm, pfn);
1491 	}
1492 	memory_bm_position_reset(orig_bm);
1493 	memory_bm_position_reset(copy_bm);
1494 	copy_pfn = memory_bm_next_pfn(copy_bm);
1495 	for(;;) {
1496 		pfn = memory_bm_next_pfn(orig_bm);
1497 		if (unlikely(pfn == BM_END_OF_MAP))
1498 			break;
1499 		if (copy_data_page(copy_pfn, pfn)) {
1500 			memory_bm_set_bit(zero_bm, pfn);
1501 			/* Use this copy_pfn for a page that is not full of zeros */
1502 			continue;
1503 		}
1504 		copied_pages++;
1505 		copy_pfn = memory_bm_next_pfn(copy_bm);
1506 	}
1507 	return copied_pages;
1508 }
1509 
1510 /* Total number of image pages */
1511 static unsigned int nr_copy_pages;
1512 /* Number of pages needed for saving the original pfns of the image pages */
1513 static unsigned int nr_meta_pages;
1514 /* Number of zero pages */
1515 static unsigned int nr_zero_pages;
1516 
1517 /*
1518  * Numbers of normal and highmem page frames allocated for hibernation image
1519  * before suspending devices.
1520  */
1521 static unsigned int alloc_normal, alloc_highmem;
1522 /*
1523  * Memory bitmap used for marking saveable pages (during hibernation) or
1524  * hibernation image pages (during restore)
1525  */
1526 static struct memory_bitmap orig_bm;
1527 /*
1528  * Memory bitmap used during hibernation for marking allocated page frames that
1529  * will contain copies of saveable pages.  During restore it is initially used
1530  * for marking hibernation image pages, but then the set bits from it are
1531  * duplicated in @orig_bm and it is released.  On highmem systems it is next
1532  * used for marking "safe" highmem pages, but it has to be reinitialized for
1533  * this purpose.
1534  */
1535 static struct memory_bitmap copy_bm;
1536 
1537 /* Memory bitmap which tracks which saveable pages were zero filled. */
1538 static struct memory_bitmap zero_bm;
1539 
1540 /**
1541  * swsusp_free - Free pages allocated for hibernation image.
1542  *
1543  * Image pages are allocated before snapshot creation, so they need to be
1544  * released after resume.
1545  */
swsusp_free(void)1546 void swsusp_free(void)
1547 {
1548 	unsigned long fb_pfn, fr_pfn;
1549 
1550 	if (!forbidden_pages_map || !free_pages_map)
1551 		goto out;
1552 
1553 	memory_bm_position_reset(forbidden_pages_map);
1554 	memory_bm_position_reset(free_pages_map);
1555 
1556 loop:
1557 	fr_pfn = memory_bm_next_pfn(free_pages_map);
1558 	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1559 
1560 	/*
1561 	 * Find the next bit set in both bitmaps. This is guaranteed to
1562 	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1563 	 */
1564 	do {
1565 		if (fb_pfn < fr_pfn)
1566 			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1567 		if (fr_pfn < fb_pfn)
1568 			fr_pfn = memory_bm_next_pfn(free_pages_map);
1569 	} while (fb_pfn != fr_pfn);
1570 
1571 	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1572 		struct page *page = pfn_to_page(fr_pfn);
1573 
1574 		memory_bm_clear_current(forbidden_pages_map);
1575 		memory_bm_clear_current(free_pages_map);
1576 		hibernate_restore_unprotect_page(page_address(page));
1577 		__free_page(page);
1578 		goto loop;
1579 	}
1580 
1581 out:
1582 	nr_copy_pages = 0;
1583 	nr_meta_pages = 0;
1584 	nr_zero_pages = 0;
1585 	restore_pblist = NULL;
1586 	buffer = NULL;
1587 	alloc_normal = 0;
1588 	alloc_highmem = 0;
1589 	hibernate_restore_protection_end();
1590 }
1591 
1592 /* Helper functions used for the shrinking of memory. */
1593 
1594 #define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1595 
1596 /**
1597  * preallocate_image_pages - Allocate a number of pages for hibernation image.
1598  * @nr_pages: Number of page frames to allocate.
1599  * @mask: GFP flags to use for the allocation.
1600  *
1601  * Return value: Number of page frames actually allocated
1602  */
preallocate_image_pages(unsigned long nr_pages,gfp_t mask)1603 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1604 {
1605 	unsigned long nr_alloc = 0;
1606 
1607 	while (nr_pages > 0) {
1608 		struct page *page;
1609 
1610 		page = alloc_image_page(mask);
1611 		if (!page)
1612 			break;
1613 		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1614 		if (PageHighMem(page))
1615 			alloc_highmem++;
1616 		else
1617 			alloc_normal++;
1618 		nr_pages--;
1619 		nr_alloc++;
1620 	}
1621 
1622 	return nr_alloc;
1623 }
1624 
preallocate_image_memory(unsigned long nr_pages,unsigned long avail_normal)1625 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1626 					      unsigned long avail_normal)
1627 {
1628 	unsigned long alloc;
1629 
1630 	if (avail_normal <= alloc_normal)
1631 		return 0;
1632 
1633 	alloc = avail_normal - alloc_normal;
1634 	if (nr_pages < alloc)
1635 		alloc = nr_pages;
1636 
1637 	return preallocate_image_pages(alloc, GFP_IMAGE);
1638 }
1639 
1640 #ifdef CONFIG_HIGHMEM
preallocate_image_highmem(unsigned long nr_pages)1641 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1642 {
1643 	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1644 }
1645 
1646 /**
1647  *  __fraction - Compute (an approximation of) x * (multiplier / base).
1648  */
__fraction(u64 x,u64 multiplier,u64 base)1649 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1650 {
1651 	return div64_u64(x * multiplier, base);
1652 }
1653 
preallocate_highmem_fraction(unsigned long nr_pages,unsigned long highmem,unsigned long total)1654 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1655 						  unsigned long highmem,
1656 						  unsigned long total)
1657 {
1658 	unsigned long alloc = __fraction(nr_pages, highmem, total);
1659 
1660 	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1661 }
1662 #else /* CONFIG_HIGHMEM */
preallocate_image_highmem(unsigned long nr_pages)1663 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1664 {
1665 	return 0;
1666 }
1667 
preallocate_highmem_fraction(unsigned long nr_pages,unsigned long highmem,unsigned long total)1668 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1669 							 unsigned long highmem,
1670 							 unsigned long total)
1671 {
1672 	return 0;
1673 }
1674 #endif /* CONFIG_HIGHMEM */
1675 
1676 /**
1677  * free_unnecessary_pages - Release preallocated pages not needed for the image.
1678  */
free_unnecessary_pages(void)1679 static unsigned long free_unnecessary_pages(void)
1680 {
1681 	unsigned long save, to_free_normal, to_free_highmem, free;
1682 
1683 	save = count_data_pages();
1684 	if (alloc_normal >= save) {
1685 		to_free_normal = alloc_normal - save;
1686 		save = 0;
1687 	} else {
1688 		to_free_normal = 0;
1689 		save -= alloc_normal;
1690 	}
1691 	save += count_highmem_pages();
1692 	if (alloc_highmem >= save) {
1693 		to_free_highmem = alloc_highmem - save;
1694 	} else {
1695 		to_free_highmem = 0;
1696 		save -= alloc_highmem;
1697 		if (to_free_normal > save)
1698 			to_free_normal -= save;
1699 		else
1700 			to_free_normal = 0;
1701 	}
1702 	free = to_free_normal + to_free_highmem;
1703 
1704 	memory_bm_position_reset(&copy_bm);
1705 
1706 	while (to_free_normal > 0 || to_free_highmem > 0) {
1707 		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1708 		struct page *page = pfn_to_page(pfn);
1709 
1710 		if (PageHighMem(page)) {
1711 			if (!to_free_highmem)
1712 				continue;
1713 			to_free_highmem--;
1714 			alloc_highmem--;
1715 		} else {
1716 			if (!to_free_normal)
1717 				continue;
1718 			to_free_normal--;
1719 			alloc_normal--;
1720 		}
1721 		memory_bm_clear_bit(&copy_bm, pfn);
1722 		swsusp_unset_page_forbidden(page);
1723 		swsusp_unset_page_free(page);
1724 		__free_page(page);
1725 	}
1726 
1727 	return free;
1728 }
1729 
1730 /**
1731  * minimum_image_size - Estimate the minimum acceptable size of an image.
1732  * @saveable: Number of saveable pages in the system.
1733  *
1734  * We want to avoid attempting to free too much memory too hard, so estimate the
1735  * minimum acceptable size of a hibernation image to use as the lower limit for
1736  * preallocating memory.
1737  *
1738  * We assume that the minimum image size should be proportional to
1739  *
1740  * [number of saveable pages] - [number of pages that can be freed in theory]
1741  *
1742  * where the second term is the sum of (1) reclaimable slab pages, (2) active
1743  * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1744  */
minimum_image_size(unsigned long saveable)1745 static unsigned long minimum_image_size(unsigned long saveable)
1746 {
1747 	unsigned long size;
1748 
1749 	size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
1750 		+ global_node_page_state(NR_ACTIVE_ANON)
1751 		+ global_node_page_state(NR_INACTIVE_ANON)
1752 		+ global_node_page_state(NR_ACTIVE_FILE)
1753 		+ global_node_page_state(NR_INACTIVE_FILE);
1754 
1755 	return saveable <= size ? 0 : saveable - size;
1756 }
1757 
1758 /**
1759  * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1760  *
1761  * To create a hibernation image it is necessary to make a copy of every page
1762  * frame in use.  We also need a number of page frames to be free during
1763  * hibernation for allocations made while saving the image and for device
1764  * drivers, in case they need to allocate memory from their hibernation
1765  * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1766  * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
1767  * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1768  * total number of available page frames and allocate at least
1769  *
1770  * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
1771  *  - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1772  *
1773  * of them, which corresponds to the maximum size of a hibernation image.
1774  *
1775  * If image_size is set below the number following from the above formula,
1776  * the preallocation of memory is continued until the total number of saveable
1777  * pages in the system is below the requested image size or the minimum
1778  * acceptable image size returned by minimum_image_size(), whichever is greater.
1779  */
hibernate_preallocate_memory(void)1780 int hibernate_preallocate_memory(void)
1781 {
1782 	struct zone *zone;
1783 	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1784 	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1785 	ktime_t start, stop;
1786 	int error;
1787 
1788 	pr_info("Preallocating image memory\n");
1789 	start = ktime_get();
1790 
1791 	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1792 	if (error) {
1793 		pr_err("Cannot allocate original bitmap\n");
1794 		goto err_out;
1795 	}
1796 
1797 	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1798 	if (error) {
1799 		pr_err("Cannot allocate copy bitmap\n");
1800 		goto err_out;
1801 	}
1802 
1803 	error = memory_bm_create(&zero_bm, GFP_IMAGE, PG_ANY);
1804 	if (error) {
1805 		pr_err("Cannot allocate zero bitmap\n");
1806 		goto err_out;
1807 	}
1808 
1809 	alloc_normal = 0;
1810 	alloc_highmem = 0;
1811 	nr_zero_pages = 0;
1812 
1813 	/* Count the number of saveable data pages. */
1814 	save_highmem = count_highmem_pages();
1815 	saveable = count_data_pages();
1816 
1817 	/*
1818 	 * Compute the total number of page frames we can use (count) and the
1819 	 * number of pages needed for image metadata (size).
1820 	 */
1821 	count = saveable;
1822 	saveable += save_highmem;
1823 	highmem = save_highmem;
1824 	size = 0;
1825 	for_each_populated_zone(zone) {
1826 		size += snapshot_additional_pages(zone);
1827 		if (is_highmem(zone))
1828 			highmem += zone_page_state(zone, NR_FREE_PAGES);
1829 		else
1830 			count += zone_page_state(zone, NR_FREE_PAGES);
1831 	}
1832 	avail_normal = count;
1833 	count += highmem;
1834 	count -= totalreserve_pages;
1835 
1836 	/* Compute the maximum number of saveable pages to leave in memory. */
1837 	max_size = (count - (size + PAGES_FOR_IO)) / 2
1838 			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1839 	/* Compute the desired number of image pages specified by image_size. */
1840 	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1841 	if (size > max_size)
1842 		size = max_size;
1843 	/*
1844 	 * If the desired number of image pages is at least as large as the
1845 	 * current number of saveable pages in memory, allocate page frames for
1846 	 * the image and we're done.
1847 	 */
1848 	if (size >= saveable) {
1849 		pages = preallocate_image_highmem(save_highmem);
1850 		pages += preallocate_image_memory(saveable - pages, avail_normal);
1851 		goto out;
1852 	}
1853 
1854 	/* Estimate the minimum size of the image. */
1855 	pages = minimum_image_size(saveable);
1856 	/*
1857 	 * To avoid excessive pressure on the normal zone, leave room in it to
1858 	 * accommodate an image of the minimum size (unless it's already too
1859 	 * small, in which case don't preallocate pages from it at all).
1860 	 */
1861 	if (avail_normal > pages)
1862 		avail_normal -= pages;
1863 	else
1864 		avail_normal = 0;
1865 	if (size < pages)
1866 		size = min_t(unsigned long, pages, max_size);
1867 
1868 	/*
1869 	 * Let the memory management subsystem know that we're going to need a
1870 	 * large number of page frames to allocate and make it free some memory.
1871 	 * NOTE: If this is not done, performance will be hurt badly in some
1872 	 * test cases.
1873 	 */
1874 	shrink_all_memory(saveable - size);
1875 
1876 	/*
1877 	 * The number of saveable pages in memory was too high, so apply some
1878 	 * pressure to decrease it.  First, make room for the largest possible
1879 	 * image and fail if that doesn't work.  Next, try to decrease the size
1880 	 * of the image as much as indicated by 'size' using allocations from
1881 	 * highmem and non-highmem zones separately.
1882 	 */
1883 	pages_highmem = preallocate_image_highmem(highmem / 2);
1884 	alloc = count - max_size;
1885 	if (alloc > pages_highmem)
1886 		alloc -= pages_highmem;
1887 	else
1888 		alloc = 0;
1889 	pages = preallocate_image_memory(alloc, avail_normal);
1890 	if (pages < alloc) {
1891 		/* We have exhausted non-highmem pages, try highmem. */
1892 		alloc -= pages;
1893 		pages += pages_highmem;
1894 		pages_highmem = preallocate_image_highmem(alloc);
1895 		if (pages_highmem < alloc) {
1896 			pr_err("Image allocation is %lu pages short\n",
1897 				alloc - pages_highmem);
1898 			goto err_out;
1899 		}
1900 		pages += pages_highmem;
1901 		/*
1902 		 * size is the desired number of saveable pages to leave in
1903 		 * memory, so try to preallocate (all memory - size) pages.
1904 		 */
1905 		alloc = (count - pages) - size;
1906 		pages += preallocate_image_highmem(alloc);
1907 	} else {
1908 		/*
1909 		 * There are approximately max_size saveable pages at this point
1910 		 * and we want to reduce this number down to size.
1911 		 */
1912 		alloc = max_size - size;
1913 		size = preallocate_highmem_fraction(alloc, highmem, count);
1914 		pages_highmem += size;
1915 		alloc -= size;
1916 		size = preallocate_image_memory(alloc, avail_normal);
1917 		pages_highmem += preallocate_image_highmem(alloc - size);
1918 		pages += pages_highmem + size;
1919 	}
1920 
1921 	/*
1922 	 * We only need as many page frames for the image as there are saveable
1923 	 * pages in memory, but we have allocated more.  Release the excessive
1924 	 * ones now.
1925 	 */
1926 	pages -= free_unnecessary_pages();
1927 
1928  out:
1929 	stop = ktime_get();
1930 	pr_info("Allocated %lu pages for snapshot\n", pages);
1931 	swsusp_show_speed(start, stop, pages, "Allocated");
1932 
1933 	return 0;
1934 
1935  err_out:
1936 	swsusp_free();
1937 	return -ENOMEM;
1938 }
1939 
1940 #ifdef CONFIG_HIGHMEM
1941 /**
1942  * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1943  *
1944  * Compute the number of non-highmem pages that will be necessary for creating
1945  * copies of highmem pages.
1946  */
count_pages_for_highmem(unsigned int nr_highmem)1947 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1948 {
1949 	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1950 
1951 	if (free_highmem >= nr_highmem)
1952 		nr_highmem = 0;
1953 	else
1954 		nr_highmem -= free_highmem;
1955 
1956 	return nr_highmem;
1957 }
1958 #else
count_pages_for_highmem(unsigned int nr_highmem)1959 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1960 #endif /* CONFIG_HIGHMEM */
1961 
1962 /**
1963  * enough_free_mem - Check if there is enough free memory for the image.
1964  */
enough_free_mem(unsigned int nr_pages,unsigned int nr_highmem)1965 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1966 {
1967 	struct zone *zone;
1968 	unsigned int free = alloc_normal;
1969 
1970 	for_each_populated_zone(zone)
1971 		if (!is_highmem(zone))
1972 			free += zone_page_state(zone, NR_FREE_PAGES);
1973 
1974 	nr_pages += count_pages_for_highmem(nr_highmem);
1975 	pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1976 		 nr_pages, PAGES_FOR_IO, free);
1977 
1978 	return free > nr_pages + PAGES_FOR_IO;
1979 }
1980 
1981 #ifdef CONFIG_HIGHMEM
1982 /**
1983  * get_highmem_buffer - Allocate a buffer for highmem pages.
1984  *
1985  * If there are some highmem pages in the hibernation image, we may need a
1986  * buffer to copy them and/or load their data.
1987  */
get_highmem_buffer(int safe_needed)1988 static inline int get_highmem_buffer(int safe_needed)
1989 {
1990 	buffer = get_image_page(GFP_ATOMIC, safe_needed);
1991 	return buffer ? 0 : -ENOMEM;
1992 }
1993 
1994 /**
1995  * alloc_highmem_pages - Allocate some highmem pages for the image.
1996  *
1997  * Try to allocate as many pages as needed, but if the number of free highmem
1998  * pages is less than that, allocate them all.
1999  */
alloc_highmem_pages(struct memory_bitmap * bm,unsigned int nr_highmem)2000 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2001 					       unsigned int nr_highmem)
2002 {
2003 	unsigned int to_alloc = count_free_highmem_pages();
2004 
2005 	if (to_alloc > nr_highmem)
2006 		to_alloc = nr_highmem;
2007 
2008 	nr_highmem -= to_alloc;
2009 	while (to_alloc-- > 0) {
2010 		struct page *page;
2011 
2012 		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
2013 		memory_bm_set_bit(bm, page_to_pfn(page));
2014 	}
2015 	return nr_highmem;
2016 }
2017 #else
get_highmem_buffer(int safe_needed)2018 static inline int get_highmem_buffer(int safe_needed) { return 0; }
2019 
alloc_highmem_pages(struct memory_bitmap * bm,unsigned int n)2020 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2021 					       unsigned int n) { return 0; }
2022 #endif /* CONFIG_HIGHMEM */
2023 
2024 /**
2025  * swsusp_alloc - Allocate memory for hibernation image.
2026  *
2027  * We first try to allocate as many highmem pages as there are
2028  * saveable highmem pages in the system.  If that fails, we allocate
2029  * non-highmem pages for the copies of the remaining highmem ones.
2030  *
2031  * In this approach it is likely that the copies of highmem pages will
2032  * also be located in the high memory, because of the way in which
2033  * copy_data_pages() works.
2034  */
swsusp_alloc(struct memory_bitmap * copy_bm,unsigned int nr_pages,unsigned int nr_highmem)2035 static int swsusp_alloc(struct memory_bitmap *copy_bm,
2036 			unsigned int nr_pages, unsigned int nr_highmem)
2037 {
2038 	if (nr_highmem > 0) {
2039 		if (get_highmem_buffer(PG_ANY))
2040 			goto err_out;
2041 		if (nr_highmem > alloc_highmem) {
2042 			nr_highmem -= alloc_highmem;
2043 			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
2044 		}
2045 	}
2046 	if (nr_pages > alloc_normal) {
2047 		nr_pages -= alloc_normal;
2048 		while (nr_pages-- > 0) {
2049 			struct page *page;
2050 
2051 			page = alloc_image_page(GFP_ATOMIC);
2052 			if (!page)
2053 				goto err_out;
2054 			memory_bm_set_bit(copy_bm, page_to_pfn(page));
2055 		}
2056 	}
2057 
2058 	return 0;
2059 
2060  err_out:
2061 	swsusp_free();
2062 	return -ENOMEM;
2063 }
2064 
swsusp_save(void)2065 asmlinkage __visible int swsusp_save(void)
2066 {
2067 	unsigned int nr_pages, nr_highmem;
2068 
2069 	pr_info("Creating image:\n");
2070 
2071 	drain_local_pages(NULL);
2072 	nr_pages = count_data_pages();
2073 	nr_highmem = count_highmem_pages();
2074 	pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
2075 
2076 	if (!enough_free_mem(nr_pages, nr_highmem)) {
2077 		pr_err("Not enough free memory\n");
2078 		return -ENOMEM;
2079 	}
2080 
2081 	if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
2082 		pr_err("Memory allocation failed\n");
2083 		return -ENOMEM;
2084 	}
2085 
2086 	/*
2087 	 * During allocating of suspend pagedir, new cold pages may appear.
2088 	 * Kill them.
2089 	 */
2090 	drain_local_pages(NULL);
2091 	nr_copy_pages = copy_data_pages(&copy_bm, &orig_bm, &zero_bm);
2092 
2093 	/*
2094 	 * End of critical section. From now on, we can write to memory,
2095 	 * but we should not touch disk. This specially means we must _not_
2096 	 * touch swap space! Except we must write out our image of course.
2097 	 */
2098 	nr_pages += nr_highmem;
2099 	/* We don't actually copy the zero pages */
2100 	nr_zero_pages = nr_pages - nr_copy_pages;
2101 	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2102 
2103 	pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
2104 
2105 	return 0;
2106 }
2107 
2108 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
init_header_complete(struct swsusp_info * info)2109 static int init_header_complete(struct swsusp_info *info)
2110 {
2111 	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2112 	info->version_code = LINUX_VERSION_CODE;
2113 	return 0;
2114 }
2115 
check_image_kernel(struct swsusp_info * info)2116 static const char *check_image_kernel(struct swsusp_info *info)
2117 {
2118 	if (info->version_code != LINUX_VERSION_CODE)
2119 		return "kernel version";
2120 	if (strcmp(info->uts.sysname,init_utsname()->sysname))
2121 		return "system type";
2122 	if (strcmp(info->uts.release,init_utsname()->release))
2123 		return "kernel release";
2124 	if (strcmp(info->uts.version,init_utsname()->version))
2125 		return "version";
2126 	if (strcmp(info->uts.machine,init_utsname()->machine))
2127 		return "machine";
2128 	return NULL;
2129 }
2130 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2131 
snapshot_get_image_size(void)2132 unsigned long snapshot_get_image_size(void)
2133 {
2134 	return nr_copy_pages + nr_meta_pages + 1;
2135 }
2136 EXPORT_SYMBOL_GPL(snapshot_get_image_size);
2137 
init_header(struct swsusp_info * info)2138 static int init_header(struct swsusp_info *info)
2139 {
2140 	memset(info, 0, sizeof(struct swsusp_info));
2141 	info->num_physpages = get_num_physpages();
2142 	info->image_pages = nr_copy_pages;
2143 	info->pages = snapshot_get_image_size();
2144 	info->size = info->pages;
2145 	info->size <<= PAGE_SHIFT;
2146 	return init_header_complete(info);
2147 }
2148 
2149 #define ENCODED_PFN_ZERO_FLAG ((unsigned long)1 << (BITS_PER_LONG - 1))
2150 #define ENCODED_PFN_MASK (~ENCODED_PFN_ZERO_FLAG)
2151 
2152 /**
2153  * pack_pfns - Prepare PFNs for saving.
2154  * @bm: Memory bitmap.
2155  * @buf: Memory buffer to store the PFNs in.
2156  * @zero_bm: Memory bitmap containing PFNs of zero pages.
2157  *
2158  * PFNs corresponding to set bits in @bm are stored in the area of memory
2159  * pointed to by @buf (1 page at a time). Pages which were filled with only
2160  * zeros will have the highest bit set in the packed format to distinguish
2161  * them from PFNs which will be contained in the image file.
2162  */
pack_pfns(unsigned long * buf,struct memory_bitmap * bm,struct memory_bitmap * zero_bm)2163 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm,
2164 		struct memory_bitmap *zero_bm)
2165 {
2166 	int j;
2167 
2168 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2169 		buf[j] = memory_bm_next_pfn(bm);
2170 		if (unlikely(buf[j] == BM_END_OF_MAP))
2171 			break;
2172 		if (memory_bm_test_bit(zero_bm, buf[j]))
2173 			buf[j] |= ENCODED_PFN_ZERO_FLAG;
2174 	}
2175 }
2176 
2177 /**
2178  * snapshot_read_next - Get the address to read the next image page from.
2179  * @handle: Snapshot handle to be used for the reading.
2180  *
2181  * On the first call, @handle should point to a zeroed snapshot_handle
2182  * structure.  The structure gets populated then and a pointer to it should be
2183  * passed to this function every next time.
2184  *
2185  * On success, the function returns a positive number.  Then, the caller
2186  * is allowed to read up to the returned number of bytes from the memory
2187  * location computed by the data_of() macro.
2188  *
2189  * The function returns 0 to indicate the end of the data stream condition,
2190  * and negative numbers are returned on errors.  If that happens, the structure
2191  * pointed to by @handle is not updated and should not be used any more.
2192  */
snapshot_read_next(struct snapshot_handle * handle)2193 int snapshot_read_next(struct snapshot_handle *handle)
2194 {
2195 	if (handle->cur > nr_meta_pages + nr_copy_pages)
2196 		return 0;
2197 
2198 	if (!buffer) {
2199 		/* This makes the buffer be freed by swsusp_free() */
2200 		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2201 		if (!buffer)
2202 			return -ENOMEM;
2203 	}
2204 	if (!handle->cur) {
2205 		int error;
2206 
2207 		error = init_header((struct swsusp_info *)buffer);
2208 		if (error)
2209 			return error;
2210 		handle->buffer = buffer;
2211 		memory_bm_position_reset(&orig_bm);
2212 		memory_bm_position_reset(&copy_bm);
2213 	} else if (handle->cur <= nr_meta_pages) {
2214 		clear_page(buffer);
2215 		pack_pfns(buffer, &orig_bm, &zero_bm);
2216 	} else {
2217 		struct page *page;
2218 
2219 		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2220 		if (PageHighMem(page)) {
2221 			/*
2222 			 * Highmem pages are copied to the buffer,
2223 			 * because we can't return with a kmapped
2224 			 * highmem page (we may not be called again).
2225 			 */
2226 			void *kaddr;
2227 
2228 			kaddr = kmap_atomic(page);
2229 			copy_page(buffer, kaddr);
2230 			kunmap_atomic(kaddr);
2231 			handle->buffer = buffer;
2232 		} else {
2233 			handle->buffer = page_address(page);
2234 		}
2235 	}
2236 	handle->cur++;
2237 	return PAGE_SIZE;
2238 }
2239 
duplicate_memory_bitmap(struct memory_bitmap * dst,struct memory_bitmap * src)2240 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2241 				    struct memory_bitmap *src)
2242 {
2243 	unsigned long pfn;
2244 
2245 	memory_bm_position_reset(src);
2246 	pfn = memory_bm_next_pfn(src);
2247 	while (pfn != BM_END_OF_MAP) {
2248 		memory_bm_set_bit(dst, pfn);
2249 		pfn = memory_bm_next_pfn(src);
2250 	}
2251 }
2252 
2253 /**
2254  * mark_unsafe_pages - Mark pages that were used before hibernation.
2255  *
2256  * Mark the pages that cannot be used for storing the image during restoration,
2257  * because they conflict with the pages that had been used before hibernation.
2258  */
mark_unsafe_pages(struct memory_bitmap * bm)2259 static void mark_unsafe_pages(struct memory_bitmap *bm)
2260 {
2261 	unsigned long pfn;
2262 
2263 	/* Clear the "free"/"unsafe" bit for all PFNs */
2264 	memory_bm_position_reset(free_pages_map);
2265 	pfn = memory_bm_next_pfn(free_pages_map);
2266 	while (pfn != BM_END_OF_MAP) {
2267 		memory_bm_clear_current(free_pages_map);
2268 		pfn = memory_bm_next_pfn(free_pages_map);
2269 	}
2270 
2271 	/* Mark pages that correspond to the "original" PFNs as "unsafe" */
2272 	duplicate_memory_bitmap(free_pages_map, bm);
2273 
2274 	allocated_unsafe_pages = 0;
2275 }
2276 
check_header(struct swsusp_info * info)2277 static int check_header(struct swsusp_info *info)
2278 {
2279 	const char *reason;
2280 
2281 	reason = check_image_kernel(info);
2282 	if (!reason && info->num_physpages != get_num_physpages())
2283 		reason = "memory size";
2284 	if (reason) {
2285 		pr_err("Image mismatch: %s\n", reason);
2286 		return -EPERM;
2287 	}
2288 	return 0;
2289 }
2290 
2291 /**
2292  * load_header - Check the image header and copy the data from it.
2293  */
load_header(struct swsusp_info * info)2294 static int load_header(struct swsusp_info *info)
2295 {
2296 	int error;
2297 
2298 	restore_pblist = NULL;
2299 	error = check_header(info);
2300 	if (!error) {
2301 		nr_copy_pages = info->image_pages;
2302 		nr_meta_pages = info->pages - info->image_pages - 1;
2303 	}
2304 	return error;
2305 }
2306 
2307 /**
2308  * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2309  * @bm: Memory bitmap.
2310  * @buf: Area of memory containing the PFNs.
2311  * @zero_bm: Memory bitmap with the zero PFNs marked.
2312  *
2313  * For each element of the array pointed to by @buf (1 page at a time), set the
2314  * corresponding bit in @bm. If the page was originally populated with only
2315  * zeros then a corresponding bit will also be set in @zero_bm.
2316  */
unpack_orig_pfns(unsigned long * buf,struct memory_bitmap * bm,struct memory_bitmap * zero_bm)2317 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm,
2318 		struct memory_bitmap *zero_bm)
2319 {
2320 	unsigned long decoded_pfn;
2321         bool zero;
2322 	int j;
2323 
2324 	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2325 		if (unlikely(buf[j] == BM_END_OF_MAP))
2326 			break;
2327 
2328 		zero = !!(buf[j] & ENCODED_PFN_ZERO_FLAG);
2329 		decoded_pfn = buf[j] & ENCODED_PFN_MASK;
2330 		if (pfn_valid(decoded_pfn) && memory_bm_pfn_present(bm, decoded_pfn)) {
2331 			memory_bm_set_bit(bm, decoded_pfn);
2332 			if (zero) {
2333 				memory_bm_set_bit(zero_bm, decoded_pfn);
2334 				nr_zero_pages++;
2335 			}
2336 		} else {
2337 			if (!pfn_valid(decoded_pfn))
2338 				pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n",
2339 				       (unsigned long long)PFN_PHYS(decoded_pfn));
2340 			return -EFAULT;
2341 		}
2342 	}
2343 
2344 	return 0;
2345 }
2346 
2347 #ifdef CONFIG_HIGHMEM
2348 /*
2349  * struct highmem_pbe is used for creating the list of highmem pages that
2350  * should be restored atomically during the resume from disk, because the page
2351  * frames they have occupied before the suspend are in use.
2352  */
2353 struct highmem_pbe {
2354 	struct page *copy_page;	/* data is here now */
2355 	struct page *orig_page;	/* data was here before the suspend */
2356 	struct highmem_pbe *next;
2357 };
2358 
2359 /*
2360  * List of highmem PBEs needed for restoring the highmem pages that were
2361  * allocated before the suspend and included in the suspend image, but have
2362  * also been allocated by the "resume" kernel, so their contents cannot be
2363  * written directly to their "original" page frames.
2364  */
2365 static struct highmem_pbe *highmem_pblist;
2366 
2367 /**
2368  * count_highmem_image_pages - Compute the number of highmem pages in the image.
2369  * @bm: Memory bitmap.
2370  *
2371  * The bits in @bm that correspond to image pages are assumed to be set.
2372  */
count_highmem_image_pages(struct memory_bitmap * bm)2373 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2374 {
2375 	unsigned long pfn;
2376 	unsigned int cnt = 0;
2377 
2378 	memory_bm_position_reset(bm);
2379 	pfn = memory_bm_next_pfn(bm);
2380 	while (pfn != BM_END_OF_MAP) {
2381 		if (PageHighMem(pfn_to_page(pfn)))
2382 			cnt++;
2383 
2384 		pfn = memory_bm_next_pfn(bm);
2385 	}
2386 	return cnt;
2387 }
2388 
2389 static unsigned int safe_highmem_pages;
2390 
2391 static struct memory_bitmap *safe_highmem_bm;
2392 
2393 /**
2394  * prepare_highmem_image - Allocate memory for loading highmem data from image.
2395  * @bm: Pointer to an uninitialized memory bitmap structure.
2396  * @nr_highmem_p: Pointer to the number of highmem image pages.
2397  *
2398  * Try to allocate as many highmem pages as there are highmem image pages
2399  * (@nr_highmem_p points to the variable containing the number of highmem image
2400  * pages).  The pages that are "safe" (ie. will not be overwritten when the
2401  * hibernation image is restored entirely) have the corresponding bits set in
2402  * @bm (it must be uninitialized).
2403  *
2404  * NOTE: This function should not be called if there are no highmem image pages.
2405  */
prepare_highmem_image(struct memory_bitmap * bm,unsigned int * nr_highmem_p)2406 static int prepare_highmem_image(struct memory_bitmap *bm,
2407 				 unsigned int *nr_highmem_p)
2408 {
2409 	unsigned int to_alloc;
2410 
2411 	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2412 		return -ENOMEM;
2413 
2414 	if (get_highmem_buffer(PG_SAFE))
2415 		return -ENOMEM;
2416 
2417 	to_alloc = count_free_highmem_pages();
2418 	if (to_alloc > *nr_highmem_p)
2419 		to_alloc = *nr_highmem_p;
2420 	else
2421 		*nr_highmem_p = to_alloc;
2422 
2423 	safe_highmem_pages = 0;
2424 	while (to_alloc-- > 0) {
2425 		struct page *page;
2426 
2427 		page = alloc_page(__GFP_HIGHMEM);
2428 		if (!swsusp_page_is_free(page)) {
2429 			/* The page is "safe", set its bit the bitmap */
2430 			memory_bm_set_bit(bm, page_to_pfn(page));
2431 			safe_highmem_pages++;
2432 		}
2433 		/* Mark the page as allocated */
2434 		swsusp_set_page_forbidden(page);
2435 		swsusp_set_page_free(page);
2436 	}
2437 	memory_bm_position_reset(bm);
2438 	safe_highmem_bm = bm;
2439 	return 0;
2440 }
2441 
2442 static struct page *last_highmem_page;
2443 
2444 /**
2445  * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2446  *
2447  * For a given highmem image page get a buffer that suspend_write_next() should
2448  * return to its caller to write to.
2449  *
2450  * If the page is to be saved to its "original" page frame or a copy of
2451  * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2452  * the copy of the page is to be made in normal memory, so the address of
2453  * the copy is returned.
2454  *
2455  * If @buffer is returned, the caller of suspend_write_next() will write
2456  * the page's contents to @buffer, so they will have to be copied to the
2457  * right location on the next call to suspend_write_next() and it is done
2458  * with the help of copy_last_highmem_page().  For this purpose, if
2459  * @buffer is returned, @last_highmem_page is set to the page to which
2460  * the data will have to be copied from @buffer.
2461  */
get_highmem_page_buffer(struct page * page,struct chain_allocator * ca)2462 static void *get_highmem_page_buffer(struct page *page,
2463 				     struct chain_allocator *ca)
2464 {
2465 	struct highmem_pbe *pbe;
2466 	void *kaddr;
2467 
2468 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2469 		/*
2470 		 * We have allocated the "original" page frame and we can
2471 		 * use it directly to store the loaded page.
2472 		 */
2473 		last_highmem_page = page;
2474 		return buffer;
2475 	}
2476 	/*
2477 	 * The "original" page frame has not been allocated and we have to
2478 	 * use a "safe" page frame to store the loaded page.
2479 	 */
2480 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2481 	if (!pbe) {
2482 		swsusp_free();
2483 		return ERR_PTR(-ENOMEM);
2484 	}
2485 	pbe->orig_page = page;
2486 	if (safe_highmem_pages > 0) {
2487 		struct page *tmp;
2488 
2489 		/* Copy of the page will be stored in high memory */
2490 		kaddr = buffer;
2491 		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2492 		safe_highmem_pages--;
2493 		last_highmem_page = tmp;
2494 		pbe->copy_page = tmp;
2495 	} else {
2496 		/* Copy of the page will be stored in normal memory */
2497 		kaddr = __get_safe_page(ca->gfp_mask);
2498 		if (!kaddr)
2499 			return ERR_PTR(-ENOMEM);
2500 		pbe->copy_page = virt_to_page(kaddr);
2501 	}
2502 	pbe->next = highmem_pblist;
2503 	highmem_pblist = pbe;
2504 	return kaddr;
2505 }
2506 
2507 /**
2508  * copy_last_highmem_page - Copy most the most recent highmem image page.
2509  *
2510  * Copy the contents of a highmem image from @buffer, where the caller of
2511  * snapshot_write_next() has stored them, to the right location represented by
2512  * @last_highmem_page .
2513  */
copy_last_highmem_page(void)2514 static void copy_last_highmem_page(void)
2515 {
2516 	if (last_highmem_page) {
2517 		void *dst;
2518 
2519 		dst = kmap_atomic(last_highmem_page);
2520 		copy_page(dst, buffer);
2521 		kunmap_atomic(dst);
2522 		last_highmem_page = NULL;
2523 	}
2524 }
2525 
last_highmem_page_copied(void)2526 static inline int last_highmem_page_copied(void)
2527 {
2528 	return !last_highmem_page;
2529 }
2530 
free_highmem_data(void)2531 static inline void free_highmem_data(void)
2532 {
2533 	if (safe_highmem_bm)
2534 		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2535 
2536 	if (buffer)
2537 		free_image_page(buffer, PG_UNSAFE_CLEAR);
2538 }
2539 #else
count_highmem_image_pages(struct memory_bitmap * bm)2540 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2541 
prepare_highmem_image(struct memory_bitmap * bm,unsigned int * nr_highmem_p)2542 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2543 					unsigned int *nr_highmem_p) { return 0; }
2544 
get_highmem_page_buffer(struct page * page,struct chain_allocator * ca)2545 static inline void *get_highmem_page_buffer(struct page *page,
2546 					    struct chain_allocator *ca)
2547 {
2548 	return ERR_PTR(-EINVAL);
2549 }
2550 
copy_last_highmem_page(void)2551 static inline void copy_last_highmem_page(void) {}
last_highmem_page_copied(void)2552 static inline int last_highmem_page_copied(void) { return 1; }
free_highmem_data(void)2553 static inline void free_highmem_data(void) {}
2554 #endif /* CONFIG_HIGHMEM */
2555 
2556 #define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2557 
2558 /**
2559  * prepare_image - Make room for loading hibernation image.
2560  * @new_bm: Uninitialized memory bitmap structure.
2561  * @bm: Memory bitmap with unsafe pages marked.
2562  * @zero_bm: Memory bitmap containing the zero pages.
2563  *
2564  * Use @bm to mark the pages that will be overwritten in the process of
2565  * restoring the system memory state from the suspend image ("unsafe" pages)
2566  * and allocate memory for the image.
2567  *
2568  * The idea is to allocate a new memory bitmap first and then allocate
2569  * as many pages as needed for image data, but without specifying what those
2570  * pages will be used for just yet.  Instead, we mark them all as allocated and
2571  * create a lists of "safe" pages to be used later.  On systems with high
2572  * memory a list of "safe" highmem pages is created too.
2573  *
2574  * Because it was not known which pages were unsafe when @zero_bm was created,
2575  * make a copy of it and recreate it within safe pages.
2576  */
prepare_image(struct memory_bitmap * new_bm,struct memory_bitmap * bm,struct memory_bitmap * zero_bm)2577 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm,
2578 		struct memory_bitmap *zero_bm)
2579 {
2580 	unsigned int nr_pages, nr_highmem;
2581 	struct memory_bitmap tmp;
2582 	struct linked_page *lp;
2583 	int error;
2584 
2585 	/* If there is no highmem, the buffer will not be necessary */
2586 	free_image_page(buffer, PG_UNSAFE_CLEAR);
2587 	buffer = NULL;
2588 
2589 	nr_highmem = count_highmem_image_pages(bm);
2590 	mark_unsafe_pages(bm);
2591 
2592 	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2593 	if (error)
2594 		goto Free;
2595 
2596 	duplicate_memory_bitmap(new_bm, bm);
2597 	memory_bm_free(bm, PG_UNSAFE_KEEP);
2598 
2599 	/* Make a copy of zero_bm so it can be created in safe pages */
2600 	error = memory_bm_create(&tmp, GFP_ATOMIC, PG_SAFE);
2601 	if (error)
2602 		goto Free;
2603 
2604 	duplicate_memory_bitmap(&tmp, zero_bm);
2605 	memory_bm_free(zero_bm, PG_UNSAFE_KEEP);
2606 
2607 	/* Recreate zero_bm in safe pages */
2608 	error = memory_bm_create(zero_bm, GFP_ATOMIC, PG_SAFE);
2609 	if (error)
2610 		goto Free;
2611 
2612 	duplicate_memory_bitmap(zero_bm, &tmp);
2613 	memory_bm_free(&tmp, PG_UNSAFE_CLEAR);
2614 	/* At this point zero_bm is in safe pages and it can be used for restoring. */
2615 
2616 	if (nr_highmem > 0) {
2617 		error = prepare_highmem_image(bm, &nr_highmem);
2618 		if (error)
2619 			goto Free;
2620 	}
2621 	/*
2622 	 * Reserve some safe pages for potential later use.
2623 	 *
2624 	 * NOTE: This way we make sure there will be enough safe pages for the
2625 	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2626 	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2627 	 *
2628 	 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2629 	 */
2630 	nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2631 	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2632 	while (nr_pages > 0) {
2633 		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2634 		if (!lp) {
2635 			error = -ENOMEM;
2636 			goto Free;
2637 		}
2638 		lp->next = safe_pages_list;
2639 		safe_pages_list = lp;
2640 		nr_pages--;
2641 	}
2642 	/* Preallocate memory for the image */
2643 	nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2644 	while (nr_pages > 0) {
2645 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2646 		if (!lp) {
2647 			error = -ENOMEM;
2648 			goto Free;
2649 		}
2650 		if (!swsusp_page_is_free(virt_to_page(lp))) {
2651 			/* The page is "safe", add it to the list */
2652 			lp->next = safe_pages_list;
2653 			safe_pages_list = lp;
2654 		}
2655 		/* Mark the page as allocated */
2656 		swsusp_set_page_forbidden(virt_to_page(lp));
2657 		swsusp_set_page_free(virt_to_page(lp));
2658 		nr_pages--;
2659 	}
2660 	return 0;
2661 
2662  Free:
2663 	swsusp_free();
2664 	return error;
2665 }
2666 
2667 /**
2668  * get_buffer - Get the address to store the next image data page.
2669  *
2670  * Get the address that snapshot_write_next() should return to its caller to
2671  * write to.
2672  */
get_buffer(struct memory_bitmap * bm,struct chain_allocator * ca)2673 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2674 {
2675 	struct pbe *pbe;
2676 	struct page *page;
2677 	unsigned long pfn = memory_bm_next_pfn(bm);
2678 
2679 	if (pfn == BM_END_OF_MAP)
2680 		return ERR_PTR(-EFAULT);
2681 
2682 	page = pfn_to_page(pfn);
2683 	if (PageHighMem(page))
2684 		return get_highmem_page_buffer(page, ca);
2685 
2686 	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2687 		/*
2688 		 * We have allocated the "original" page frame and we can
2689 		 * use it directly to store the loaded page.
2690 		 */
2691 		return page_address(page);
2692 
2693 	/*
2694 	 * The "original" page frame has not been allocated and we have to
2695 	 * use a "safe" page frame to store the loaded page.
2696 	 */
2697 	pbe = chain_alloc(ca, sizeof(struct pbe));
2698 	if (!pbe) {
2699 		swsusp_free();
2700 		return ERR_PTR(-ENOMEM);
2701 	}
2702 	pbe->orig_address = page_address(page);
2703 	pbe->address = __get_safe_page(ca->gfp_mask);
2704 	if (!pbe->address)
2705 		return ERR_PTR(-ENOMEM);
2706 	pbe->next = restore_pblist;
2707 	restore_pblist = pbe;
2708 	return pbe->address;
2709 }
2710 
2711 /**
2712  * snapshot_write_next - Get the address to store the next image page.
2713  * @handle: Snapshot handle structure to guide the writing.
2714  *
2715  * On the first call, @handle should point to a zeroed snapshot_handle
2716  * structure.  The structure gets populated then and a pointer to it should be
2717  * passed to this function every next time.
2718  *
2719  * On success, the function returns a positive number.  Then, the caller
2720  * is allowed to write up to the returned number of bytes to the memory
2721  * location computed by the data_of() macro.
2722  *
2723  * The function returns 0 to indicate the "end of file" condition.  Negative
2724  * numbers are returned on errors, in which cases the structure pointed to by
2725  * @handle is not updated and should not be used any more.
2726  */
snapshot_write_next(struct snapshot_handle * handle)2727 int snapshot_write_next(struct snapshot_handle *handle)
2728 {
2729 	static struct chain_allocator ca;
2730 	int error = 0;
2731 
2732 next:
2733 	/* Check if we have already loaded the entire image */
2734 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
2735 		return 0;
2736 
2737 	if (!handle->cur) {
2738 		if (!buffer)
2739 			/* This makes the buffer be freed by swsusp_free() */
2740 			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2741 
2742 		if (!buffer)
2743 			return -ENOMEM;
2744 
2745 		handle->buffer = buffer;
2746 	} else if (handle->cur == 1) {
2747 		error = load_header(buffer);
2748 		if (error)
2749 			return error;
2750 
2751 		safe_pages_list = NULL;
2752 
2753 		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2754 		if (error)
2755 			return error;
2756 
2757 		error = memory_bm_create(&zero_bm, GFP_ATOMIC, PG_ANY);
2758 		if (error)
2759 			return error;
2760 
2761 		nr_zero_pages = 0;
2762 
2763 		hibernate_restore_protection_begin();
2764 	} else if (handle->cur <= nr_meta_pages + 1) {
2765 		error = unpack_orig_pfns(buffer, &copy_bm, &zero_bm);
2766 		if (error)
2767 			return error;
2768 
2769 		if (handle->cur == nr_meta_pages + 1) {
2770 			error = prepare_image(&orig_bm, &copy_bm, &zero_bm);
2771 			if (error)
2772 				return error;
2773 
2774 			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2775 			memory_bm_position_reset(&orig_bm);
2776 			memory_bm_position_reset(&zero_bm);
2777 			restore_pblist = NULL;
2778 			handle->buffer = get_buffer(&orig_bm, &ca);
2779 			if (IS_ERR(handle->buffer))
2780 				return PTR_ERR(handle->buffer);
2781 		}
2782 	} else {
2783 		copy_last_highmem_page();
2784 		hibernate_restore_protect_page(handle->buffer);
2785 		handle->buffer = get_buffer(&orig_bm, &ca);
2786 		if (IS_ERR(handle->buffer))
2787 			return PTR_ERR(handle->buffer);
2788 	}
2789 	handle->sync_read = (handle->buffer == buffer);
2790 	handle->cur++;
2791 
2792 	/* Zero pages were not included in the image, memset it and move on. */
2793 	if (handle->cur > nr_meta_pages + 1 &&
2794 	    memory_bm_test_bit(&zero_bm, memory_bm_get_current(&orig_bm))) {
2795 		memset(handle->buffer, 0, PAGE_SIZE);
2796 		goto next;
2797 	}
2798 
2799 	return PAGE_SIZE;
2800 }
2801 
2802 /**
2803  * snapshot_write_finalize - Complete the loading of a hibernation image.
2804  *
2805  * Must be called after the last call to snapshot_write_next() in case the last
2806  * page in the image happens to be a highmem page and its contents should be
2807  * stored in highmem.  Additionally, it recycles bitmap memory that's not
2808  * necessary any more.
2809  */
snapshot_write_finalize(struct snapshot_handle * handle)2810 void snapshot_write_finalize(struct snapshot_handle *handle)
2811 {
2812 	copy_last_highmem_page();
2813 	hibernate_restore_protect_page(handle->buffer);
2814 	/* Do that only if we have loaded the image entirely */
2815 	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
2816 		memory_bm_recycle(&orig_bm);
2817 		free_highmem_data();
2818 	}
2819 }
2820 
snapshot_image_loaded(struct snapshot_handle * handle)2821 int snapshot_image_loaded(struct snapshot_handle *handle)
2822 {
2823 	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2824 			handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages);
2825 }
2826 
2827 #ifdef CONFIG_HIGHMEM
2828 /* Assumes that @buf is ready and points to a "safe" page */
swap_two_pages_data(struct page * p1,struct page * p2,void * buf)2829 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2830 				       void *buf)
2831 {
2832 	void *kaddr1, *kaddr2;
2833 
2834 	kaddr1 = kmap_atomic(p1);
2835 	kaddr2 = kmap_atomic(p2);
2836 	copy_page(buf, kaddr1);
2837 	copy_page(kaddr1, kaddr2);
2838 	copy_page(kaddr2, buf);
2839 	kunmap_atomic(kaddr2);
2840 	kunmap_atomic(kaddr1);
2841 }
2842 
2843 /**
2844  * restore_highmem - Put highmem image pages into their original locations.
2845  *
2846  * For each highmem page that was in use before hibernation and is included in
2847  * the image, and also has been allocated by the "restore" kernel, swap its
2848  * current contents with the previous (ie. "before hibernation") ones.
2849  *
2850  * If the restore eventually fails, we can call this function once again and
2851  * restore the highmem state as seen by the restore kernel.
2852  */
restore_highmem(void)2853 int restore_highmem(void)
2854 {
2855 	struct highmem_pbe *pbe = highmem_pblist;
2856 	void *buf;
2857 
2858 	if (!pbe)
2859 		return 0;
2860 
2861 	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2862 	if (!buf)
2863 		return -ENOMEM;
2864 
2865 	while (pbe) {
2866 		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2867 		pbe = pbe->next;
2868 	}
2869 	free_image_page(buf, PG_UNSAFE_CLEAR);
2870 	return 0;
2871 }
2872 #endif /* CONFIG_HIGHMEM */
2873