Lines Matching refs:page
144 static struct page *kimage_alloc_page(struct kimage *image,
299 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages()
301 struct page *pages; in kimage_alloc_pages()
326 static void kimage_free_pages(struct page *page) in kimage_free_pages() argument
330 order = page_private(page); in kimage_free_pages()
333 arch_kexec_pre_free_pages(page_address(page), count); in kimage_free_pages()
336 ClearPageReserved(page + i); in kimage_free_pages()
337 __free_pages(page, order); in kimage_free_pages()
342 struct page *page, *next; in kimage_free_page_list() local
344 list_for_each_entry_safe(page, next, list, lru) { in kimage_free_page_list()
345 list_del(&page->lru); in kimage_free_page_list()
346 kimage_free_pages(page); in kimage_free_page_list()
350 static struct page *kimage_alloc_normal_control_pages(struct kimage *image, in kimage_alloc_normal_control_pages()
367 struct page *pages; in kimage_alloc_normal_control_pages()
416 static struct page *kimage_alloc_crash_control_pages(struct kimage *image, in kimage_alloc_crash_control_pages()
441 struct page *pages; in kimage_alloc_crash_control_pages()
483 struct page *kimage_alloc_control_pages(struct kimage *image, in kimage_alloc_control_pages()
486 struct page *pages = NULL; in kimage_alloc_control_pages()
502 struct page *vmcoreinfo_page; in kimage_crash_copy_vmcoreinfo()
541 struct page *page; in kimage_add_entry() local
543 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); in kimage_add_entry()
544 if (!page) in kimage_add_entry()
547 ind_page = page_address(page); in kimage_add_entry()
572 static int kimage_add_page(struct kimage *image, unsigned long page) in kimage_add_page() argument
576 page &= PAGE_MASK; in kimage_add_page()
577 result = kimage_add_entry(image, page | IND_SOURCE); in kimage_add_page()
613 struct page *page; in kimage_free_entry() local
615 page = boot_pfn_to_page(entry >> PAGE_SHIFT); in kimage_free_entry()
616 kimage_free_pages(page); in kimage_free_entry()
666 unsigned long page) in kimage_dst_used() argument
675 if (page == destination) in kimage_dst_used()
684 static struct page *kimage_alloc_page(struct kimage *image, in kimage_alloc_page()
706 struct page *page; in kimage_alloc_page() local
713 list_for_each_entry(page, &image->dest_pages, lru) { in kimage_alloc_page()
714 addr = page_to_boot_pfn(page) << PAGE_SHIFT; in kimage_alloc_page()
716 list_del(&page->lru); in kimage_alloc_page()
717 return page; in kimage_alloc_page()
720 page = NULL; in kimage_alloc_page()
725 page = kimage_alloc_pages(gfp_mask, 0); in kimage_alloc_page()
726 if (!page) in kimage_alloc_page()
729 if (page_to_boot_pfn(page) > in kimage_alloc_page()
731 list_add(&page->lru, &image->unusable_pages); in kimage_alloc_page()
734 addr = page_to_boot_pfn(page) << PAGE_SHIFT; in kimage_alloc_page()
754 struct page *old_page; in kimage_alloc_page()
758 copy_highpage(page, old_page); in kimage_alloc_page()
771 page = old_page; in kimage_alloc_page()
775 list_add(&page->lru, &image->dest_pages); in kimage_alloc_page()
778 return page; in kimage_alloc_page()
804 struct page *page; in kimage_load_normal_segment() local
808 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); in kimage_load_normal_segment()
809 if (!page) { in kimage_load_normal_segment()
813 result = kimage_add_page(image, page_to_boot_pfn(page) in kimage_load_normal_segment()
818 ptr = kmap(page); in kimage_load_normal_segment()
831 kunmap(page); in kimage_load_normal_segment()
872 struct page *page; in kimage_load_crash_segment() local
876 page = boot_pfn_to_page(maddr >> PAGE_SHIFT); in kimage_load_crash_segment()
877 if (!page) { in kimage_load_crash_segment()
881 arch_kexec_post_alloc_pages(page_address(page), 1, 0); in kimage_load_crash_segment()
882 ptr = kmap(page); in kimage_load_crash_segment()
897 kexec_flush_icache_page(page); in kimage_load_crash_segment()
898 kunmap(page); in kimage_load_crash_segment()
899 arch_kexec_pre_free_pages(page_address(page), 1); in kimage_load_crash_segment()