• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/kernel/power/swap.c
4  *
5  * This file provides functions for reading the suspend image from
6  * and writing it to a swap partition.
7  *
8  * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
9  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
10  * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
11  */
12 
13 #define pr_fmt(fmt) "PM: " fmt
14 
15 #include <linux/module.h>
16 #include <linux/file.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/device.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/pm.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/cpumask.h>
28 #include <linux/atomic.h>
29 #include <linux/kthread.h>
30 #include <linux/crc32.h>
31 #include <linux/ktime.h>
32 #include <trace/hooks/bl_hib.h>
33 
34 #include "power.h"
35 
36 #define HIBERNATE_SIG	"S1SUSPEND"
37 
38 u32 swsusp_hardware_signature;
39 
40 /*
41  * When reading an {un,}compressed image, we may restore pages in place,
42  * in which case some architectures need these pages cleaning before they
43  * can be executed. We don't know which pages these may be, so clean the lot.
44  */
45 static bool clean_pages_on_read;
46 static bool clean_pages_on_decompress;
47 
48 /*
49  *	The swap map is a data structure used for keeping track of each page
50  *	written to a swap partition.  It consists of many swap_map_page
51  *	structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
52  *	These structures are stored on the swap and linked together with the
53  *	help of the .next_swap member.
54  *
55  *	The swap map is created during suspend.  The swap map pages are
56  *	allocated and populated one at a time, so we only need one memory
57  *	page to set up the entire structure.
58  *
59  *	During resume we pick up all swap_map_page structures into a list.
60  */
61 
62 #define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
63 
64 /*
65  * Number of free pages that are not high.
66  */
low_free_pages(void)67 static inline unsigned long low_free_pages(void)
68 {
69 	return nr_free_pages() - nr_free_highpages();
70 }
71 
72 /*
73  * Number of pages required to be kept free while writing the image. Always
74  * half of all available low pages before the writing starts.
75  */
reqd_free_pages(void)76 static inline unsigned long reqd_free_pages(void)
77 {
78 	return low_free_pages() / 2;
79 }
80 
81 struct swap_map_page {
82 	sector_t entries[MAP_PAGE_ENTRIES];
83 	sector_t next_swap;
84 };
85 
86 struct swap_map_page_list {
87 	struct swap_map_page *map;
88 	struct swap_map_page_list *next;
89 };
90 
91 /*
92  *	The swap_map_handle structure is used for handling swap in
93  *	a file-alike way
94  */
95 
96 struct swap_map_handle {
97 	struct swap_map_page *cur;
98 	struct swap_map_page_list *maps;
99 	sector_t cur_swap;
100 	sector_t first_sector;
101 	unsigned int k;
102 	unsigned long reqd_free_pages;
103 	u32 crc32;
104 };
105 
106 struct swsusp_header {
107 	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
108 	              sizeof(u32) - sizeof(u32)];
109 	u32	hw_sig;
110 	u32	crc32;
111 	sector_t image;
112 	unsigned int flags;	/* Flags to pass to the "boot" kernel */
113 	char	orig_sig[10];
114 	char	sig[10];
115 } __packed;
116 
117 static struct swsusp_header *swsusp_header;
118 
119 /*
120  *	The following functions are used for tracing the allocated
121  *	swap pages, so that they can be freed in case of an error.
122  */
123 
124 struct swsusp_extent {
125 	struct rb_node node;
126 	unsigned long start;
127 	unsigned long end;
128 };
129 
130 static struct rb_root swsusp_extents = RB_ROOT;
131 
swsusp_extents_insert(unsigned long swap_offset)132 static int swsusp_extents_insert(unsigned long swap_offset)
133 {
134 	struct rb_node **new = &(swsusp_extents.rb_node);
135 	struct rb_node *parent = NULL;
136 	struct swsusp_extent *ext;
137 
138 	/* Figure out where to put the new node */
139 	while (*new) {
140 		ext = rb_entry(*new, struct swsusp_extent, node);
141 		parent = *new;
142 		if (swap_offset < ext->start) {
143 			/* Try to merge */
144 			if (swap_offset == ext->start - 1) {
145 				ext->start--;
146 				return 0;
147 			}
148 			new = &((*new)->rb_left);
149 		} else if (swap_offset > ext->end) {
150 			/* Try to merge */
151 			if (swap_offset == ext->end + 1) {
152 				ext->end++;
153 				return 0;
154 			}
155 			new = &((*new)->rb_right);
156 		} else {
157 			/* It already is in the tree */
158 			return -EINVAL;
159 		}
160 	}
161 	/* Add the new node and rebalance the tree. */
162 	ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
163 	if (!ext)
164 		return -ENOMEM;
165 
166 	ext->start = swap_offset;
167 	ext->end = swap_offset;
168 	rb_link_node(&ext->node, parent, new);
169 	rb_insert_color(&ext->node, &swsusp_extents);
170 	return 0;
171 }
172 
173 /*
174  *	alloc_swapdev_block - allocate a swap page and register that it has
175  *	been allocated, so that it can be freed in case of an error.
176  */
177 
alloc_swapdev_block(int swap)178 sector_t alloc_swapdev_block(int swap)
179 {
180 	unsigned long offset;
181 
182 	offset = swp_offset(get_swap_page_of_type(swap));
183 	if (offset) {
184 		if (swsusp_extents_insert(offset))
185 			swap_free(swp_entry(swap, offset));
186 		else
187 			return swapdev_block(swap, offset);
188 	}
189 	return 0;
190 }
191 EXPORT_SYMBOL_GPL(alloc_swapdev_block);
192 
193 /*
194  *	free_all_swap_pages - free swap pages allocated for saving image data.
195  *	It also frees the extents used to register which swap entries had been
196  *	allocated.
197  */
198 
free_all_swap_pages(int swap)199 void free_all_swap_pages(int swap)
200 {
201 	struct rb_node *node;
202 
203 	while ((node = swsusp_extents.rb_node)) {
204 		struct swsusp_extent *ext;
205 
206 		ext = rb_entry(node, struct swsusp_extent, node);
207 		rb_erase(node, &swsusp_extents);
208 		swap_free_nr(swp_entry(swap, ext->start),
209 			     ext->end - ext->start + 1);
210 
211 		kfree(ext);
212 	}
213 }
214 
swsusp_swap_in_use(void)215 int swsusp_swap_in_use(void)
216 {
217 	return (swsusp_extents.rb_node != NULL);
218 }
219 
220 /*
221  * General things
222  */
223 
224 static unsigned short root_swap = 0xffff;
225 static struct file *hib_resume_bdev_file;
226 
227 struct hib_bio_batch {
228 	atomic_t		count;
229 	wait_queue_head_t	wait;
230 	blk_status_t		error;
231 	struct blk_plug		plug;
232 };
233 
hib_init_batch(struct hib_bio_batch * hb)234 static void hib_init_batch(struct hib_bio_batch *hb)
235 {
236 	atomic_set(&hb->count, 0);
237 	init_waitqueue_head(&hb->wait);
238 	hb->error = BLK_STS_OK;
239 	blk_start_plug(&hb->plug);
240 }
241 
hib_finish_batch(struct hib_bio_batch * hb)242 static void hib_finish_batch(struct hib_bio_batch *hb)
243 {
244 	blk_finish_plug(&hb->plug);
245 }
246 
hib_end_io(struct bio * bio)247 static void hib_end_io(struct bio *bio)
248 {
249 	struct hib_bio_batch *hb = bio->bi_private;
250 	struct page *page = bio_first_page_all(bio);
251 
252 	if (bio->bi_status) {
253 		pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
254 			 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
255 			 (unsigned long long)bio->bi_iter.bi_sector);
256 	}
257 
258 	if (bio_data_dir(bio) == WRITE)
259 		put_page(page);
260 	else if (clean_pages_on_read)
261 		flush_icache_range((unsigned long)page_address(page),
262 				   (unsigned long)page_address(page) + PAGE_SIZE);
263 
264 	if (bio->bi_status && !hb->error)
265 		hb->error = bio->bi_status;
266 	if (atomic_dec_and_test(&hb->count))
267 		wake_up(&hb->wait);
268 
269 	bio_put(bio);
270 }
271 
hib_submit_io(blk_opf_t opf,pgoff_t page_off,void * addr,struct hib_bio_batch * hb)272 static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
273 			 struct hib_bio_batch *hb)
274 {
275 	struct page *page = virt_to_page(addr);
276 	struct bio *bio;
277 	int error = 0;
278 
279 	bio = bio_alloc(file_bdev(hib_resume_bdev_file), 1, opf,
280 			GFP_NOIO | __GFP_HIGH);
281 	bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
282 
283 	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
284 		pr_err("Adding page to bio failed at %llu\n",
285 		       (unsigned long long)bio->bi_iter.bi_sector);
286 		bio_put(bio);
287 		return -EFAULT;
288 	}
289 
290 	if (hb) {
291 		bio->bi_end_io = hib_end_io;
292 		bio->bi_private = hb;
293 		atomic_inc(&hb->count);
294 		submit_bio(bio);
295 	} else {
296 		error = submit_bio_wait(bio);
297 		bio_put(bio);
298 	}
299 
300 	return error;
301 }
302 
hib_wait_io(struct hib_bio_batch * hb)303 static int hib_wait_io(struct hib_bio_batch *hb)
304 {
305 	/*
306 	 * We are relying on the behavior of blk_plug that a thread with
307 	 * a plug will flush the plug list before sleeping.
308 	 */
309 	wait_event(hb->wait, atomic_read(&hb->count) == 0);
310 	return blk_status_to_errno(hb->error);
311 }
312 
313 /*
314  * Saving part
315  */
mark_swapfiles(struct swap_map_handle * handle,unsigned int flags)316 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
317 {
318 	int error;
319 
320 	hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL);
321 	if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
322 	    !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
323 		memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
324 		memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
325 		swsusp_header->image = handle->first_sector;
326 		if (swsusp_hardware_signature) {
327 			swsusp_header->hw_sig = swsusp_hardware_signature;
328 			flags |= SF_HW_SIG;
329 		}
330 		swsusp_header->flags = flags;
331 		if (flags & SF_CRC32_MODE)
332 			swsusp_header->crc32 = handle->crc32;
333 		error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
334 				      swsusp_resume_block, swsusp_header, NULL);
335 	} else {
336 		pr_err("Swap header not found!\n");
337 		error = -ENODEV;
338 	}
339 	return error;
340 }
341 
342 /*
343  * Hold the swsusp_header flag. This is used in software_resume() in
344  * 'kernel/power/hibernate' to check if the image is compressed and query
345  * for the compression algorithm support(if so).
346  */
347 unsigned int swsusp_header_flags;
348 
349 /**
350  *	swsusp_swap_check - check if the resume device is a swap device
351  *	and get its index (if so)
352  *
353  *	This is called before saving image
354  */
swsusp_swap_check(void)355 static int swsusp_swap_check(void)
356 {
357 	int res;
358 
359 	if (swsusp_resume_device)
360 		res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
361 	else
362 		res = find_first_swap(&swsusp_resume_device);
363 	if (res < 0)
364 		return res;
365 	root_swap = res;
366 
367 	hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device,
368 			BLK_OPEN_WRITE, NULL, NULL);
369 	if (IS_ERR(hib_resume_bdev_file))
370 		return PTR_ERR(hib_resume_bdev_file);
371 
372 	return 0;
373 }
374 
375 /**
376  *	write_page - Write one page to given swap location.
377  *	@buf:		Address we're writing.
378  *	@offset:	Offset of the swap page we're writing to.
379  *	@hb:		bio completion batch
380  */
381 
write_page(void * buf,sector_t offset,struct hib_bio_batch * hb)382 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
383 {
384 	void *src;
385 	int ret;
386 
387 	if (!offset)
388 		return -ENOSPC;
389 
390 	if (hb) {
391 		src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
392 		                              __GFP_NORETRY);
393 		if (src) {
394 			copy_page(src, buf);
395 		} else {
396 			ret = hib_wait_io(hb); /* Free pages */
397 			if (ret)
398 				return ret;
399 			src = (void *)__get_free_page(GFP_NOIO |
400 			                              __GFP_NOWARN |
401 			                              __GFP_NORETRY);
402 			if (src) {
403 				copy_page(src, buf);
404 			} else {
405 				WARN_ON_ONCE(1);
406 				hb = NULL;	/* Go synchronous */
407 				src = buf;
408 			}
409 		}
410 	} else {
411 		src = buf;
412 	}
413 	return hib_submit_io(REQ_OP_WRITE | REQ_SYNC, offset, src, hb);
414 }
415 
release_swap_writer(struct swap_map_handle * handle)416 static void release_swap_writer(struct swap_map_handle *handle)
417 {
418 	if (handle->cur)
419 		free_page((unsigned long)handle->cur);
420 	handle->cur = NULL;
421 }
422 
get_swap_writer(struct swap_map_handle * handle)423 static int get_swap_writer(struct swap_map_handle *handle)
424 {
425 	int ret;
426 
427 	ret = swsusp_swap_check();
428 	if (ret) {
429 		if (ret != -ENOSPC)
430 			pr_err("Cannot find swap device, try swapon -a\n");
431 		return ret;
432 	}
433 	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
434 	if (!handle->cur) {
435 		ret = -ENOMEM;
436 		goto err_close;
437 	}
438 	handle->cur_swap = alloc_swapdev_block(root_swap);
439 	if (!handle->cur_swap) {
440 		ret = -ENOSPC;
441 		goto err_rel;
442 	}
443 	handle->k = 0;
444 	handle->reqd_free_pages = reqd_free_pages();
445 	handle->first_sector = handle->cur_swap;
446 	return 0;
447 err_rel:
448 	release_swap_writer(handle);
449 err_close:
450 	swsusp_close();
451 	return ret;
452 }
453 
swap_write_page(struct swap_map_handle * handle,void * buf,struct hib_bio_batch * hb)454 static int swap_write_page(struct swap_map_handle *handle, void *buf,
455 		struct hib_bio_batch *hb)
456 {
457 	int error;
458 	sector_t offset;
459 	bool skip = false;
460 
461 	if (!handle->cur)
462 		return -EINVAL;
463 	offset = alloc_swapdev_block(root_swap);
464 	error = write_page(buf, offset, hb);
465 	if (error)
466 		return error;
467 	handle->cur->entries[handle->k++] = offset;
468 	if (handle->k >= MAP_PAGE_ENTRIES) {
469 		offset = alloc_swapdev_block(root_swap);
470 		if (!offset)
471 			return -ENOSPC;
472 		handle->cur->next_swap = offset;
473 		trace_android_vh_skip_swap_map_write(&skip);
474 		if (!skip) {
475 			error = write_page(handle->cur, handle->cur_swap, hb);
476 			if (error)
477 				goto out;
478 		}
479 		clear_page(handle->cur);
480 		handle->cur_swap = offset;
481 		handle->k = 0;
482 
483 		if (hb && low_free_pages() <= handle->reqd_free_pages) {
484 			error = hib_wait_io(hb);
485 			if (error)
486 				goto out;
487 			/*
488 			 * Recalculate the number of required free pages, to
489 			 * make sure we never take more than half.
490 			 */
491 			handle->reqd_free_pages = reqd_free_pages();
492 		}
493 	}
494  out:
495 	return error;
496 }
497 
flush_swap_writer(struct swap_map_handle * handle)498 static int flush_swap_writer(struct swap_map_handle *handle)
499 {
500 	if (handle->cur && handle->cur_swap)
501 		return write_page(handle->cur, handle->cur_swap, NULL);
502 	else
503 		return -EINVAL;
504 }
505 
swap_writer_finish(struct swap_map_handle * handle,unsigned int flags,int error)506 static int swap_writer_finish(struct swap_map_handle *handle,
507 		unsigned int flags, int error)
508 {
509 	if (!error) {
510 		pr_info("S");
511 		error = mark_swapfiles(handle, flags);
512 		pr_cont("|\n");
513 		flush_swap_writer(handle);
514 	}
515 
516 	if (error)
517 		free_all_swap_pages(root_swap);
518 	release_swap_writer(handle);
519 	swsusp_close();
520 
521 	return error;
522 }
523 
524 /*
525  * Bytes we need for compressed data in worst case. We assume(limitation)
526  * this is the worst of all the compression algorithms.
527  */
528 #define bytes_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2)
529 
530 /* We need to remember how much compressed data we need to read. */
531 #define CMP_HEADER	sizeof(size_t)
532 
533 /* Number of pages/bytes we'll compress at one time. */
534 #define UNC_PAGES	32
535 #define UNC_SIZE	(UNC_PAGES * PAGE_SIZE)
536 
537 /* Number of pages we need for compressed data (worst case). */
538 #define CMP_PAGES	DIV_ROUND_UP(bytes_worst_compress(UNC_SIZE) + \
539 				CMP_HEADER, PAGE_SIZE)
540 #define CMP_SIZE	(CMP_PAGES * PAGE_SIZE)
541 
542 /* Maximum number of threads for compression/decompression. */
543 #define CMP_THREADS	3
544 
545 /* Minimum/maximum number of pages for read buffering. */
546 #define CMP_MIN_RD_PAGES	1024
547 #define CMP_MAX_RD_PAGES	8192
548 
549 /**
550  *	save_image - save the suspend image data
551  */
552 
save_image(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_write)553 static int save_image(struct swap_map_handle *handle,
554                       struct snapshot_handle *snapshot,
555                       unsigned int nr_to_write)
556 {
557 	unsigned int m;
558 	int ret;
559 	int nr_pages;
560 	int err2;
561 	struct hib_bio_batch hb;
562 	ktime_t start;
563 	ktime_t stop;
564 
565 	hib_init_batch(&hb);
566 
567 	pr_info("Saving image data pages (%u pages)...\n",
568 		nr_to_write);
569 	m = nr_to_write / 10;
570 	if (!m)
571 		m = 1;
572 	nr_pages = 0;
573 	start = ktime_get();
574 	while (1) {
575 		ret = snapshot_read_next(snapshot);
576 		if (ret <= 0)
577 			break;
578 		trace_android_vh_encrypt_page(data_of(*snapshot));
579 		ret = swap_write_page(handle, data_of(*snapshot), &hb);
580 		if (ret)
581 			break;
582 		if (!(nr_pages % m))
583 			pr_info("Image saving progress: %3d%%\n",
584 				nr_pages / m * 10);
585 		nr_pages++;
586 	}
587 	err2 = hib_wait_io(&hb);
588 	hib_finish_batch(&hb);
589 	stop = ktime_get();
590 	if (!ret)
591 		ret = err2;
592 	if (!ret)
593 		pr_info("Image saving done\n");
594 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
595 	return ret;
596 }
597 
598 /*
599  * Structure used for CRC32.
600  */
601 struct crc_data {
602 	struct task_struct *thr;                  /* thread */
603 	atomic_t ready;                           /* ready to start flag */
604 	atomic_t stop;                            /* ready to stop flag */
605 	unsigned run_threads;                     /* nr current threads */
606 	wait_queue_head_t go;                     /* start crc update */
607 	wait_queue_head_t done;                   /* crc update done */
608 	u32 *crc32;                               /* points to handle's crc32 */
609 	size_t *unc_len[CMP_THREADS];             /* uncompressed lengths */
610 	unsigned char *unc[CMP_THREADS];          /* uncompressed data */
611 };
612 
613 /*
614  * CRC32 update function that runs in its own thread.
615  */
crc32_threadfn(void * data)616 static int crc32_threadfn(void *data)
617 {
618 	struct crc_data *d = data;
619 	unsigned i;
620 
621 	while (1) {
622 		wait_event(d->go, atomic_read_acquire(&d->ready) ||
623 		                  kthread_should_stop());
624 		if (kthread_should_stop()) {
625 			d->thr = NULL;
626 			atomic_set_release(&d->stop, 1);
627 			wake_up(&d->done);
628 			break;
629 		}
630 		atomic_set(&d->ready, 0);
631 
632 		for (i = 0; i < d->run_threads; i++)
633 			*d->crc32 = crc32_le(*d->crc32,
634 			                     d->unc[i], *d->unc_len[i]);
635 		atomic_set_release(&d->stop, 1);
636 		wake_up(&d->done);
637 	}
638 	return 0;
639 }
640 /*
641  * Structure used for data compression.
642  */
643 struct cmp_data {
644 	struct task_struct *thr;                  /* thread */
645 	struct crypto_comp *cc;                   /* crypto compressor stream */
646 	atomic_t ready;                           /* ready to start flag */
647 	atomic_t stop;                            /* ready to stop flag */
648 	int ret;                                  /* return code */
649 	wait_queue_head_t go;                     /* start compression */
650 	wait_queue_head_t done;                   /* compression done */
651 	size_t unc_len;                           /* uncompressed length */
652 	size_t cmp_len;                           /* compressed length */
653 	unsigned char unc[UNC_SIZE];              /* uncompressed buffer */
654 	unsigned char cmp[CMP_SIZE];              /* compressed buffer */
655 };
656 
657 /* Indicates the image size after compression */
658 static atomic_t compressed_size = ATOMIC_INIT(0);
659 
660 /*
661  * Compression function that runs in its own thread.
662  */
compress_threadfn(void * data)663 static int compress_threadfn(void *data)
664 {
665 	struct cmp_data *d = data;
666 	unsigned int cmp_len = 0;
667 
668 	while (1) {
669 		wait_event(d->go, atomic_read_acquire(&d->ready) ||
670 		                  kthread_should_stop());
671 		if (kthread_should_stop()) {
672 			d->thr = NULL;
673 			d->ret = -1;
674 			atomic_set_release(&d->stop, 1);
675 			wake_up(&d->done);
676 			break;
677 		}
678 		atomic_set(&d->ready, 0);
679 
680 		cmp_len = CMP_SIZE - CMP_HEADER;
681 		d->ret = crypto_comp_compress(d->cc, d->unc, d->unc_len,
682 					      d->cmp + CMP_HEADER,
683 					      &cmp_len);
684 		d->cmp_len = cmp_len;
685 
686 		atomic_set(&compressed_size, atomic_read(&compressed_size) + d->cmp_len);
687 		atomic_set_release(&d->stop, 1);
688 		wake_up(&d->done);
689 	}
690 	return 0;
691 }
692 
693 /**
694  * save_compressed_image - Save the suspend image data after compression.
695  * @handle: Swap map handle to use for saving the image.
696  * @snapshot: Image to read data from.
697  * @nr_to_write: Number of pages to save.
698  */
save_compressed_image(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_write)699 static int save_compressed_image(struct swap_map_handle *handle,
700 				 struct snapshot_handle *snapshot,
701 				 unsigned int nr_to_write)
702 {
703 	unsigned int m;
704 	int ret = 0;
705 	int nr_pages;
706 	int err2;
707 	struct hib_bio_batch hb;
708 	ktime_t start;
709 	ktime_t stop;
710 	size_t off;
711 	unsigned thr, run_threads, nr_threads;
712 	unsigned char *page = NULL;
713 	struct cmp_data *data = NULL;
714 	struct crc_data *crc = NULL;
715 
716 	hib_init_batch(&hb);
717 
718 	atomic_set(&compressed_size, 0);
719 
720 	/*
721 	 * We'll limit the number of threads for compression to limit memory
722 	 * footprint.
723 	 */
724 	nr_threads = num_online_cpus() - 1;
725 	nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
726 
727 	page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
728 	if (!page) {
729 		pr_err("Failed to allocate %s page\n", hib_comp_algo);
730 		ret = -ENOMEM;
731 		goto out_clean;
732 	}
733 
734 	data = vzalloc(array_size(nr_threads, sizeof(*data)));
735 	if (!data) {
736 		pr_err("Failed to allocate %s data\n", hib_comp_algo);
737 		ret = -ENOMEM;
738 		goto out_clean;
739 	}
740 
741 	crc = kzalloc(sizeof(*crc), GFP_KERNEL);
742 	if (!crc) {
743 		pr_err("Failed to allocate crc\n");
744 		ret = -ENOMEM;
745 		goto out_clean;
746 	}
747 
748 	/*
749 	 * Start the compression threads.
750 	 */
751 	for (thr = 0; thr < nr_threads; thr++) {
752 		init_waitqueue_head(&data[thr].go);
753 		init_waitqueue_head(&data[thr].done);
754 
755 		data[thr].cc = crypto_alloc_comp(hib_comp_algo, 0, 0);
756 		if (IS_ERR_OR_NULL(data[thr].cc)) {
757 			pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
758 			ret = -EFAULT;
759 			goto out_clean;
760 		}
761 
762 		data[thr].thr = kthread_run(compress_threadfn,
763 		                            &data[thr],
764 		                            "image_compress/%u", thr);
765 		if (IS_ERR(data[thr].thr)) {
766 			data[thr].thr = NULL;
767 			pr_err("Cannot start compression threads\n");
768 			ret = -ENOMEM;
769 			goto out_clean;
770 		}
771 	}
772 
773 	/*
774 	 * Start the CRC32 thread.
775 	 */
776 	init_waitqueue_head(&crc->go);
777 	init_waitqueue_head(&crc->done);
778 
779 	handle->crc32 = 0;
780 	crc->crc32 = &handle->crc32;
781 	for (thr = 0; thr < nr_threads; thr++) {
782 		crc->unc[thr] = data[thr].unc;
783 		crc->unc_len[thr] = &data[thr].unc_len;
784 	}
785 
786 	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
787 	if (IS_ERR(crc->thr)) {
788 		crc->thr = NULL;
789 		pr_err("Cannot start CRC32 thread\n");
790 		ret = -ENOMEM;
791 		goto out_clean;
792 	}
793 
794 	/*
795 	 * Adjust the number of required free pages after all allocations have
796 	 * been done. We don't want to run out of pages when writing.
797 	 */
798 	handle->reqd_free_pages = reqd_free_pages();
799 
800 	pr_info("Using %u thread(s) for %s compression\n", nr_threads, hib_comp_algo);
801 	pr_info("Compressing and saving image data (%u pages)...\n",
802 		nr_to_write);
803 	m = nr_to_write / 10;
804 	if (!m)
805 		m = 1;
806 	nr_pages = 0;
807 	start = ktime_get();
808 	for (;;) {
809 		for (thr = 0; thr < nr_threads; thr++) {
810 			for (off = 0; off < UNC_SIZE; off += PAGE_SIZE) {
811 				ret = snapshot_read_next(snapshot);
812 				if (ret < 0)
813 					goto out_finish;
814 
815 				if (!ret)
816 					break;
817 
818 				memcpy(data[thr].unc + off,
819 				       data_of(*snapshot), PAGE_SIZE);
820 
821 				if (!(nr_pages % m))
822 					pr_info("Image saving progress: %3d%%\n",
823 						nr_pages / m * 10);
824 				nr_pages++;
825 			}
826 			if (!off)
827 				break;
828 
829 			data[thr].unc_len = off;
830 
831 			atomic_set_release(&data[thr].ready, 1);
832 			wake_up(&data[thr].go);
833 		}
834 
835 		if (!thr)
836 			break;
837 
838 		crc->run_threads = thr;
839 		atomic_set_release(&crc->ready, 1);
840 		wake_up(&crc->go);
841 
842 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
843 			wait_event(data[thr].done,
844 				atomic_read_acquire(&data[thr].stop));
845 			atomic_set(&data[thr].stop, 0);
846 
847 			ret = data[thr].ret;
848 
849 			if (ret < 0) {
850 				pr_err("%s compression failed\n", hib_comp_algo);
851 				goto out_finish;
852 			}
853 
854 			if (unlikely(!data[thr].cmp_len ||
855 			             data[thr].cmp_len >
856 				     bytes_worst_compress(data[thr].unc_len))) {
857 				pr_err("Invalid %s compressed length\n", hib_comp_algo);
858 				ret = -1;
859 				goto out_finish;
860 			}
861 
862 			*(size_t *)data[thr].cmp = data[thr].cmp_len;
863 
864 			/*
865 			 * Given we are writing one page at a time to disk, we
866 			 * copy that much from the buffer, although the last
867 			 * bit will likely be smaller than full page. This is
868 			 * OK - we saved the length of the compressed data, so
869 			 * any garbage at the end will be discarded when we
870 			 * read it.
871 			 */
872 			for (off = 0;
873 			     off < CMP_HEADER + data[thr].cmp_len;
874 			     off += PAGE_SIZE) {
875 				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
876 
877 				trace_android_vh_encrypt_page(page);
878 				ret = swap_write_page(handle, page, &hb);
879 				if (ret)
880 					goto out_finish;
881 			}
882 			trace_android_vh_hibernate_save_cmp_len(data[thr].cmp_len + CMP_HEADER);
883 		}
884 
885 		wait_event(crc->done, atomic_read_acquire(&crc->stop));
886 		atomic_set(&crc->stop, 0);
887 	}
888 
889 out_finish:
890 	err2 = hib_wait_io(&hb);
891 	stop = ktime_get();
892 	if (!ret)
893 		ret = err2;
894 	if (!ret)
895 		pr_info("Image saving done\n");
896 	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
897 	pr_info("Image size after compression: %d kbytes\n",
898 		(atomic_read(&compressed_size) / 1024));
899 
900 out_clean:
901 	hib_finish_batch(&hb);
902 	if (crc) {
903 		if (crc->thr)
904 			kthread_stop(crc->thr);
905 		kfree(crc);
906 	}
907 	if (data) {
908 		for (thr = 0; thr < nr_threads; thr++) {
909 			if (data[thr].thr)
910 				kthread_stop(data[thr].thr);
911 			if (data[thr].cc)
912 				crypto_free_comp(data[thr].cc);
913 		}
914 		vfree(data);
915 	}
916 	if (page) free_page((unsigned long)page);
917 
918 	return ret;
919 }
920 
921 /**
922  *	enough_swap - Make sure we have enough swap to save the image.
923  *
924  *	Returns TRUE or FALSE after checking the total amount of swap
925  *	space available from the resume partition.
926  */
927 
enough_swap(unsigned int nr_pages)928 static int enough_swap(unsigned int nr_pages)
929 {
930 	unsigned int free_swap = count_swap_pages(root_swap, 1);
931 	unsigned int required;
932 
933 	pr_debug("Free swap pages: %u\n", free_swap);
934 
935 	required = PAGES_FOR_IO + nr_pages;
936 	return free_swap > required;
937 }
938 
939 /**
940  *	swsusp_write - Write entire image and metadata.
941  *	@flags: flags to pass to the "boot" kernel in the image header
942  *
943  *	It is important _NOT_ to umount filesystems at this point. We want
944  *	them synced (in case something goes wrong) but we DO not want to mark
945  *	filesystem clean: it is not. (And it does not matter, if we resume
946  *	correctly, we'll mark system clean, anyway.)
947  */
948 
swsusp_write(unsigned int flags)949 int swsusp_write(unsigned int flags)
950 {
951 	struct swap_map_handle handle;
952 	struct snapshot_handle snapshot;
953 	struct swsusp_info *header;
954 	unsigned long pages;
955 	int error = 0;
956 
957 	pages = snapshot_get_image_size();
958 
959 	/*
960 	 * The memory allocated by this vendor hook is later freed as part of
961 	 * PM_POST_HIBERNATION notifier call.
962 	 */
963 	trace_android_vh_hibernated_do_mem_alloc(pages, flags, &error);
964 	if (error < 0) {
965 		pr_err("Failed to allocate required memory\n");
966 		return error;
967 	}
968 
969 	error = get_swap_writer(&handle);
970 	if (error) {
971 		pr_err("Cannot get swap writer\n");
972 		return error;
973 	}
974 	trace_android_vh_init_aes_encrypt(NULL);
975 	if (flags & SF_NOCOMPRESS_MODE) {
976 		if (!enough_swap(pages)) {
977 			pr_err("Not enough free swap\n");
978 			error = -ENOSPC;
979 			goto out_finish;
980 		}
981 	}
982 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
983 	error = snapshot_read_next(&snapshot);
984 	if (error < (int)PAGE_SIZE) {
985 		if (error >= 0)
986 			error = -EFAULT;
987 
988 		goto out_finish;
989 	}
990 	header = (struct swsusp_info *)data_of(snapshot);
991 	error = swap_write_page(&handle, header, NULL);
992 	if (!error) {
993 		error = (flags & SF_NOCOMPRESS_MODE) ?
994 			save_image(&handle, &snapshot, pages - 1) :
995 			save_compressed_image(&handle, &snapshot, pages - 1);
996 		if (!error)
997 			trace_android_vh_post_image_save(root_swap);
998 	}
999 out_finish:
1000 	error = swap_writer_finish(&handle, flags, error);
1001 	return error;
1002 }
1003 
1004 /*
1005  *	The following functions allow us to read data using a swap map
1006  *	in a file-like way.
1007  */
1008 
release_swap_reader(struct swap_map_handle * handle)1009 static void release_swap_reader(struct swap_map_handle *handle)
1010 {
1011 	struct swap_map_page_list *tmp;
1012 
1013 	while (handle->maps) {
1014 		if (handle->maps->map)
1015 			free_page((unsigned long)handle->maps->map);
1016 		tmp = handle->maps;
1017 		handle->maps = handle->maps->next;
1018 		kfree(tmp);
1019 	}
1020 	handle->cur = NULL;
1021 }
1022 
get_swap_reader(struct swap_map_handle * handle,unsigned int * flags_p)1023 static int get_swap_reader(struct swap_map_handle *handle,
1024 		unsigned int *flags_p)
1025 {
1026 	int error;
1027 	struct swap_map_page_list *tmp, *last;
1028 	sector_t offset;
1029 
1030 	*flags_p = swsusp_header->flags;
1031 
1032 	if (!swsusp_header->image) /* how can this happen? */
1033 		return -EINVAL;
1034 
1035 	handle->cur = NULL;
1036 	last = handle->maps = NULL;
1037 	offset = swsusp_header->image;
1038 	while (offset) {
1039 		tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
1040 		if (!tmp) {
1041 			release_swap_reader(handle);
1042 			return -ENOMEM;
1043 		}
1044 		if (!handle->maps)
1045 			handle->maps = tmp;
1046 		if (last)
1047 			last->next = tmp;
1048 		last = tmp;
1049 
1050 		tmp->map = (struct swap_map_page *)
1051 			   __get_free_page(GFP_NOIO | __GFP_HIGH);
1052 		if (!tmp->map) {
1053 			release_swap_reader(handle);
1054 			return -ENOMEM;
1055 		}
1056 
1057 		error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL);
1058 		if (error) {
1059 			release_swap_reader(handle);
1060 			return error;
1061 		}
1062 		offset = tmp->map->next_swap;
1063 	}
1064 	handle->k = 0;
1065 	handle->cur = handle->maps->map;
1066 	return 0;
1067 }
1068 
swap_read_page(struct swap_map_handle * handle,void * buf,struct hib_bio_batch * hb)1069 static int swap_read_page(struct swap_map_handle *handle, void *buf,
1070 		struct hib_bio_batch *hb)
1071 {
1072 	sector_t offset;
1073 	int error;
1074 	struct swap_map_page_list *tmp;
1075 
1076 	if (!handle->cur)
1077 		return -EINVAL;
1078 	offset = handle->cur->entries[handle->k];
1079 	if (!offset)
1080 		return -EFAULT;
1081 	error = hib_submit_io(REQ_OP_READ, offset, buf, hb);
1082 	if (error)
1083 		return error;
1084 	if (++handle->k >= MAP_PAGE_ENTRIES) {
1085 		handle->k = 0;
1086 		free_page((unsigned long)handle->maps->map);
1087 		tmp = handle->maps;
1088 		handle->maps = handle->maps->next;
1089 		kfree(tmp);
1090 		if (!handle->maps)
1091 			release_swap_reader(handle);
1092 		else
1093 			handle->cur = handle->maps->map;
1094 	}
1095 	return error;
1096 }
1097 
swap_reader_finish(struct swap_map_handle * handle)1098 static int swap_reader_finish(struct swap_map_handle *handle)
1099 {
1100 	release_swap_reader(handle);
1101 
1102 	return 0;
1103 }
1104 
1105 /**
1106  *	load_image - load the image using the swap map handle
1107  *	@handle and the snapshot handle @snapshot
1108  *	(assume there are @nr_pages pages to load)
1109  */
1110 
load_image(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_read)1111 static int load_image(struct swap_map_handle *handle,
1112                       struct snapshot_handle *snapshot,
1113                       unsigned int nr_to_read)
1114 {
1115 	unsigned int m;
1116 	int ret = 0;
1117 	ktime_t start;
1118 	ktime_t stop;
1119 	struct hib_bio_batch hb;
1120 	int err2;
1121 	unsigned nr_pages;
1122 
1123 	hib_init_batch(&hb);
1124 
1125 	clean_pages_on_read = true;
1126 	pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1127 	m = nr_to_read / 10;
1128 	if (!m)
1129 		m = 1;
1130 	nr_pages = 0;
1131 	start = ktime_get();
1132 	for ( ; ; ) {
1133 		ret = snapshot_write_next(snapshot);
1134 		if (ret <= 0)
1135 			break;
1136 		ret = swap_read_page(handle, data_of(*snapshot), &hb);
1137 		if (ret)
1138 			break;
1139 		if (snapshot->sync_read)
1140 			ret = hib_wait_io(&hb);
1141 		if (ret)
1142 			break;
1143 		if (!(nr_pages % m))
1144 			pr_info("Image loading progress: %3d%%\n",
1145 				nr_pages / m * 10);
1146 		nr_pages++;
1147 	}
1148 	err2 = hib_wait_io(&hb);
1149 	hib_finish_batch(&hb);
1150 	stop = ktime_get();
1151 	if (!ret)
1152 		ret = err2;
1153 	if (!ret) {
1154 		pr_info("Image loading done\n");
1155 		ret = snapshot_write_finalize(snapshot);
1156 		if (!ret && !snapshot_image_loaded(snapshot))
1157 			ret = -ENODATA;
1158 	}
1159 	swsusp_show_speed(start, stop, nr_to_read, "Read");
1160 	return ret;
1161 }
1162 
1163 /*
1164  * Structure used for data decompression.
1165  */
1166 struct dec_data {
1167 	struct task_struct *thr;                  /* thread */
1168 	struct crypto_comp *cc;                   /* crypto compressor stream */
1169 	atomic_t ready;                           /* ready to start flag */
1170 	atomic_t stop;                            /* ready to stop flag */
1171 	int ret;                                  /* return code */
1172 	wait_queue_head_t go;                     /* start decompression */
1173 	wait_queue_head_t done;                   /* decompression done */
1174 	size_t unc_len;                           /* uncompressed length */
1175 	size_t cmp_len;                           /* compressed length */
1176 	unsigned char unc[UNC_SIZE];              /* uncompressed buffer */
1177 	unsigned char cmp[CMP_SIZE];              /* compressed buffer */
1178 };
1179 
1180 /*
1181  * Decompression function that runs in its own thread.
1182  */
decompress_threadfn(void * data)1183 static int decompress_threadfn(void *data)
1184 {
1185 	struct dec_data *d = data;
1186 	unsigned int unc_len = 0;
1187 
1188 	while (1) {
1189 		wait_event(d->go, atomic_read_acquire(&d->ready) ||
1190 		                  kthread_should_stop());
1191 		if (kthread_should_stop()) {
1192 			d->thr = NULL;
1193 			d->ret = -1;
1194 			atomic_set_release(&d->stop, 1);
1195 			wake_up(&d->done);
1196 			break;
1197 		}
1198 		atomic_set(&d->ready, 0);
1199 
1200 		unc_len = UNC_SIZE;
1201 		d->ret = crypto_comp_decompress(d->cc, d->cmp + CMP_HEADER, d->cmp_len,
1202 						d->unc, &unc_len);
1203 		d->unc_len = unc_len;
1204 
1205 		if (clean_pages_on_decompress)
1206 			flush_icache_range((unsigned long)d->unc,
1207 					   (unsigned long)d->unc + d->unc_len);
1208 
1209 		atomic_set_release(&d->stop, 1);
1210 		wake_up(&d->done);
1211 	}
1212 	return 0;
1213 }
1214 
1215 /**
1216  * load_compressed_image - Load compressed image data and decompress it.
1217  * @handle: Swap map handle to use for loading data.
1218  * @snapshot: Image to copy uncompressed data into.
1219  * @nr_to_read: Number of pages to load.
1220  */
load_compressed_image(struct swap_map_handle * handle,struct snapshot_handle * snapshot,unsigned int nr_to_read)1221 static int load_compressed_image(struct swap_map_handle *handle,
1222 				 struct snapshot_handle *snapshot,
1223 				 unsigned int nr_to_read)
1224 {
1225 	unsigned int m;
1226 	int ret = 0;
1227 	int eof = 0;
1228 	struct hib_bio_batch hb;
1229 	ktime_t start;
1230 	ktime_t stop;
1231 	unsigned nr_pages;
1232 	size_t off;
1233 	unsigned i, thr, run_threads, nr_threads;
1234 	unsigned ring = 0, pg = 0, ring_size = 0,
1235 	         have = 0, want, need, asked = 0;
1236 	unsigned long read_pages = 0;
1237 	unsigned char **page = NULL;
1238 	struct dec_data *data = NULL;
1239 	struct crc_data *crc = NULL;
1240 
1241 	hib_init_batch(&hb);
1242 
1243 	/*
1244 	 * We'll limit the number of threads for decompression to limit memory
1245 	 * footprint.
1246 	 */
1247 	nr_threads = num_online_cpus() - 1;
1248 	nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
1249 
1250 	page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page)));
1251 	if (!page) {
1252 		pr_err("Failed to allocate %s page\n", hib_comp_algo);
1253 		ret = -ENOMEM;
1254 		goto out_clean;
1255 	}
1256 
1257 	data = vzalloc(array_size(nr_threads, sizeof(*data)));
1258 	if (!data) {
1259 		pr_err("Failed to allocate %s data\n", hib_comp_algo);
1260 		ret = -ENOMEM;
1261 		goto out_clean;
1262 	}
1263 
1264 	crc = kzalloc(sizeof(*crc), GFP_KERNEL);
1265 	if (!crc) {
1266 		pr_err("Failed to allocate crc\n");
1267 		ret = -ENOMEM;
1268 		goto out_clean;
1269 	}
1270 
1271 	clean_pages_on_decompress = true;
1272 
1273 	/*
1274 	 * Start the decompression threads.
1275 	 */
1276 	for (thr = 0; thr < nr_threads; thr++) {
1277 		init_waitqueue_head(&data[thr].go);
1278 		init_waitqueue_head(&data[thr].done);
1279 
1280 		data[thr].cc = crypto_alloc_comp(hib_comp_algo, 0, 0);
1281 		if (IS_ERR_OR_NULL(data[thr].cc)) {
1282 			pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
1283 			ret = -EFAULT;
1284 			goto out_clean;
1285 		}
1286 
1287 		data[thr].thr = kthread_run(decompress_threadfn,
1288 		                            &data[thr],
1289 		                            "image_decompress/%u", thr);
1290 		if (IS_ERR(data[thr].thr)) {
1291 			data[thr].thr = NULL;
1292 			pr_err("Cannot start decompression threads\n");
1293 			ret = -ENOMEM;
1294 			goto out_clean;
1295 		}
1296 	}
1297 
1298 	/*
1299 	 * Start the CRC32 thread.
1300 	 */
1301 	init_waitqueue_head(&crc->go);
1302 	init_waitqueue_head(&crc->done);
1303 
1304 	handle->crc32 = 0;
1305 	crc->crc32 = &handle->crc32;
1306 	for (thr = 0; thr < nr_threads; thr++) {
1307 		crc->unc[thr] = data[thr].unc;
1308 		crc->unc_len[thr] = &data[thr].unc_len;
1309 	}
1310 
1311 	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1312 	if (IS_ERR(crc->thr)) {
1313 		crc->thr = NULL;
1314 		pr_err("Cannot start CRC32 thread\n");
1315 		ret = -ENOMEM;
1316 		goto out_clean;
1317 	}
1318 
1319 	/*
1320 	 * Set the number of pages for read buffering.
1321 	 * This is complete guesswork, because we'll only know the real
1322 	 * picture once prepare_image() is called, which is much later on
1323 	 * during the image load phase. We'll assume the worst case and
1324 	 * say that none of the image pages are from high memory.
1325 	 */
1326 	if (low_free_pages() > snapshot_get_image_size())
1327 		read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1328 	read_pages = clamp_val(read_pages, CMP_MIN_RD_PAGES, CMP_MAX_RD_PAGES);
1329 
1330 	for (i = 0; i < read_pages; i++) {
1331 		page[i] = (void *)__get_free_page(i < CMP_PAGES ?
1332 						  GFP_NOIO | __GFP_HIGH :
1333 						  GFP_NOIO | __GFP_NOWARN |
1334 						  __GFP_NORETRY);
1335 
1336 		if (!page[i]) {
1337 			if (i < CMP_PAGES) {
1338 				ring_size = i;
1339 				pr_err("Failed to allocate %s pages\n", hib_comp_algo);
1340 				ret = -ENOMEM;
1341 				goto out_clean;
1342 			} else {
1343 				break;
1344 			}
1345 		}
1346 	}
1347 	want = ring_size = i;
1348 
1349 	pr_info("Using %u thread(s) for %s decompression\n", nr_threads, hib_comp_algo);
1350 	pr_info("Loading and decompressing image data (%u pages)...\n",
1351 		nr_to_read);
1352 	m = nr_to_read / 10;
1353 	if (!m)
1354 		m = 1;
1355 	nr_pages = 0;
1356 	start = ktime_get();
1357 
1358 	ret = snapshot_write_next(snapshot);
1359 	if (ret <= 0)
1360 		goto out_finish;
1361 
1362 	for(;;) {
1363 		for (i = 0; !eof && i < want; i++) {
1364 			ret = swap_read_page(handle, page[ring], &hb);
1365 			if (ret) {
1366 				/*
1367 				 * On real read error, finish. On end of data,
1368 				 * set EOF flag and just exit the read loop.
1369 				 */
1370 				if (handle->cur &&
1371 				    handle->cur->entries[handle->k]) {
1372 					goto out_finish;
1373 				} else {
1374 					eof = 1;
1375 					break;
1376 				}
1377 			}
1378 			if (++ring >= ring_size)
1379 				ring = 0;
1380 		}
1381 		asked += i;
1382 		want -= i;
1383 
1384 		/*
1385 		 * We are out of data, wait for some more.
1386 		 */
1387 		if (!have) {
1388 			if (!asked)
1389 				break;
1390 
1391 			ret = hib_wait_io(&hb);
1392 			if (ret)
1393 				goto out_finish;
1394 			have += asked;
1395 			asked = 0;
1396 			if (eof)
1397 				eof = 2;
1398 		}
1399 
1400 		if (crc->run_threads) {
1401 			wait_event(crc->done, atomic_read_acquire(&crc->stop));
1402 			atomic_set(&crc->stop, 0);
1403 			crc->run_threads = 0;
1404 		}
1405 
1406 		for (thr = 0; have && thr < nr_threads; thr++) {
1407 			data[thr].cmp_len = *(size_t *)page[pg];
1408 			if (unlikely(!data[thr].cmp_len ||
1409 			             data[thr].cmp_len >
1410 					bytes_worst_compress(UNC_SIZE))) {
1411 				pr_err("Invalid %s compressed length\n", hib_comp_algo);
1412 				ret = -1;
1413 				goto out_finish;
1414 			}
1415 
1416 			need = DIV_ROUND_UP(data[thr].cmp_len + CMP_HEADER,
1417 			                    PAGE_SIZE);
1418 			if (need > have) {
1419 				if (eof > 1) {
1420 					ret = -1;
1421 					goto out_finish;
1422 				}
1423 				break;
1424 			}
1425 
1426 			for (off = 0;
1427 			     off < CMP_HEADER + data[thr].cmp_len;
1428 			     off += PAGE_SIZE) {
1429 				memcpy(data[thr].cmp + off,
1430 				       page[pg], PAGE_SIZE);
1431 				have--;
1432 				want++;
1433 				if (++pg >= ring_size)
1434 					pg = 0;
1435 			}
1436 
1437 			atomic_set_release(&data[thr].ready, 1);
1438 			wake_up(&data[thr].go);
1439 		}
1440 
1441 		/*
1442 		 * Wait for more data while we are decompressing.
1443 		 */
1444 		if (have < CMP_PAGES && asked) {
1445 			ret = hib_wait_io(&hb);
1446 			if (ret)
1447 				goto out_finish;
1448 			have += asked;
1449 			asked = 0;
1450 			if (eof)
1451 				eof = 2;
1452 		}
1453 
1454 		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1455 			wait_event(data[thr].done,
1456 				atomic_read_acquire(&data[thr].stop));
1457 			atomic_set(&data[thr].stop, 0);
1458 
1459 			ret = data[thr].ret;
1460 
1461 			if (ret < 0) {
1462 				pr_err("%s decompression failed\n", hib_comp_algo);
1463 				goto out_finish;
1464 			}
1465 
1466 			if (unlikely(!data[thr].unc_len ||
1467 				data[thr].unc_len > UNC_SIZE ||
1468 				data[thr].unc_len & (PAGE_SIZE - 1))) {
1469 				pr_err("Invalid %s uncompressed length\n", hib_comp_algo);
1470 				ret = -1;
1471 				goto out_finish;
1472 			}
1473 
1474 			for (off = 0;
1475 			     off < data[thr].unc_len; off += PAGE_SIZE) {
1476 				memcpy(data_of(*snapshot),
1477 				       data[thr].unc + off, PAGE_SIZE);
1478 
1479 				if (!(nr_pages % m))
1480 					pr_info("Image loading progress: %3d%%\n",
1481 						nr_pages / m * 10);
1482 				nr_pages++;
1483 
1484 				ret = snapshot_write_next(snapshot);
1485 				if (ret <= 0) {
1486 					crc->run_threads = thr + 1;
1487 					atomic_set_release(&crc->ready, 1);
1488 					wake_up(&crc->go);
1489 					goto out_finish;
1490 				}
1491 			}
1492 		}
1493 
1494 		crc->run_threads = thr;
1495 		atomic_set_release(&crc->ready, 1);
1496 		wake_up(&crc->go);
1497 	}
1498 
1499 out_finish:
1500 	if (crc->run_threads) {
1501 		wait_event(crc->done, atomic_read_acquire(&crc->stop));
1502 		atomic_set(&crc->stop, 0);
1503 	}
1504 	stop = ktime_get();
1505 	if (!ret) {
1506 		pr_info("Image loading done\n");
1507 		ret = snapshot_write_finalize(snapshot);
1508 		if (!ret && !snapshot_image_loaded(snapshot))
1509 			ret = -ENODATA;
1510 		if (!ret) {
1511 			if (swsusp_header->flags & SF_CRC32_MODE) {
1512 				if(handle->crc32 != swsusp_header->crc32) {
1513 					pr_err("Invalid image CRC32!\n");
1514 					ret = -ENODATA;
1515 				}
1516 			}
1517 		}
1518 	}
1519 	swsusp_show_speed(start, stop, nr_to_read, "Read");
1520 out_clean:
1521 	hib_finish_batch(&hb);
1522 	for (i = 0; i < ring_size; i++)
1523 		free_page((unsigned long)page[i]);
1524 	if (crc) {
1525 		if (crc->thr)
1526 			kthread_stop(crc->thr);
1527 		kfree(crc);
1528 	}
1529 	if (data) {
1530 		for (thr = 0; thr < nr_threads; thr++) {
1531 			if (data[thr].thr)
1532 				kthread_stop(data[thr].thr);
1533 			if (data[thr].cc)
1534 				crypto_free_comp(data[thr].cc);
1535 		}
1536 		vfree(data);
1537 	}
1538 	vfree(page);
1539 
1540 	return ret;
1541 }
1542 
1543 /**
1544  *	swsusp_read - read the hibernation image.
1545  *	@flags_p: flags passed by the "frozen" kernel in the image header should
1546  *		  be written into this memory location
1547  */
1548 
swsusp_read(unsigned int * flags_p)1549 int swsusp_read(unsigned int *flags_p)
1550 {
1551 	int error;
1552 	struct swap_map_handle handle;
1553 	struct snapshot_handle snapshot;
1554 	struct swsusp_info *header;
1555 
1556 	memset(&snapshot, 0, sizeof(struct snapshot_handle));
1557 	error = snapshot_write_next(&snapshot);
1558 	if (error < (int)PAGE_SIZE)
1559 		return error < 0 ? error : -EFAULT;
1560 	header = (struct swsusp_info *)data_of(snapshot);
1561 	error = get_swap_reader(&handle, flags_p);
1562 	if (error)
1563 		goto end;
1564 	if (!error)
1565 		error = swap_read_page(&handle, header, NULL);
1566 	if (!error) {
1567 		error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1568 			load_image(&handle, &snapshot, header->pages - 1) :
1569 			load_compressed_image(&handle, &snapshot, header->pages - 1);
1570 	}
1571 	swap_reader_finish(&handle);
1572 end:
1573 	if (!error)
1574 		pr_debug("Image successfully loaded\n");
1575 	else
1576 		pr_debug("Error %d resuming\n", error);
1577 	return error;
1578 }
1579 
1580 static void *swsusp_holder;
1581 
1582 /**
1583  * swsusp_check - Open the resume device and check for the swsusp signature.
1584  * @exclusive: Open the resume device exclusively.
1585  */
1586 
swsusp_check(bool exclusive)1587 int swsusp_check(bool exclusive)
1588 {
1589 	void *holder = exclusive ? &swsusp_holder : NULL;
1590 	int error;
1591 
1592 	hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device,
1593 				BLK_OPEN_READ, holder, NULL);
1594 	if (!IS_ERR(hib_resume_bdev_file)) {
1595 		trace_android_vh_save_hib_resume_bdev(hib_resume_bdev_file);
1596 		clear_page(swsusp_header);
1597 		error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1598 					swsusp_header, NULL);
1599 		if (error)
1600 			goto put;
1601 
1602 		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1603 			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1604 			swsusp_header_flags = swsusp_header->flags;
1605 			/* Reset swap signature now */
1606 			error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1607 						swsusp_resume_block,
1608 						swsusp_header, NULL);
1609 		} else {
1610 			error = -EINVAL;
1611 		}
1612 		if (!error && swsusp_header->flags & SF_HW_SIG &&
1613 		    swsusp_header->hw_sig != swsusp_hardware_signature) {
1614 			pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
1615 				swsusp_header->hw_sig, swsusp_hardware_signature);
1616 			error = -EINVAL;
1617 		}
1618 
1619 put:
1620 		if (error)
1621 			bdev_fput(hib_resume_bdev_file);
1622 		else
1623 			pr_debug("Image signature found, resuming\n");
1624 	} else {
1625 		error = PTR_ERR(hib_resume_bdev_file);
1626 	}
1627 
1628 	if (error)
1629 		pr_debug("Image not found (code %d)\n", error);
1630 
1631 	return error;
1632 }
1633 
1634 /**
1635  * swsusp_close - close resume device.
1636  */
1637 
swsusp_close(void)1638 void swsusp_close(void)
1639 {
1640 	if (IS_ERR(hib_resume_bdev_file)) {
1641 		pr_debug("Image device not initialised\n");
1642 		return;
1643 	}
1644 
1645 	fput(hib_resume_bdev_file);
1646 }
1647 
1648 /**
1649  *      swsusp_unmark - Unmark swsusp signature in the resume device
1650  */
1651 
1652 #ifdef CONFIG_SUSPEND
swsusp_unmark(void)1653 int swsusp_unmark(void)
1654 {
1655 	int error;
1656 
1657 	hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1658 			swsusp_header, NULL);
1659 	if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1660 		memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1661 		error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1662 					swsusp_resume_block,
1663 					swsusp_header, NULL);
1664 	} else {
1665 		pr_err("Cannot find swsusp signature!\n");
1666 		error = -ENODEV;
1667 	}
1668 
1669 	/*
1670 	 * We just returned from suspend, we don't need the image any more.
1671 	 */
1672 	free_all_swap_pages(root_swap);
1673 
1674 	return error;
1675 }
1676 #endif
1677 
swsusp_header_init(void)1678 static int __init swsusp_header_init(void)
1679 {
1680 	swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1681 	if (!swsusp_header)
1682 		panic("Could not allocate memory for swsusp_header\n");
1683 	return 0;
1684 }
1685 
1686 core_initcall(swsusp_header_init);
1687