• Home
  • Raw
  • Download

Lines Matching refs:page

113 static struct page *kimage_alloc_page(struct kimage *image,
355 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages()
357 struct page *pages; in kimage_alloc_pages()
372 static void kimage_free_pages(struct page *page) in kimage_free_pages() argument
376 order = page_private(page); in kimage_free_pages()
379 ClearPageReserved(page + i); in kimage_free_pages()
380 __free_pages(page, order); in kimage_free_pages()
388 struct page *page; in kimage_free_page_list() local
390 page = list_entry(pos, struct page, lru); in kimage_free_page_list()
391 list_del(&page->lru); in kimage_free_page_list()
392 kimage_free_pages(page); in kimage_free_page_list()
396 static struct page *kimage_alloc_normal_control_pages(struct kimage *image, in kimage_alloc_normal_control_pages()
413 struct page *pages; in kimage_alloc_normal_control_pages()
462 static struct page *kimage_alloc_crash_control_pages(struct kimage *image, in kimage_alloc_crash_control_pages()
487 struct page *pages; in kimage_alloc_crash_control_pages()
526 struct page *kimage_alloc_control_pages(struct kimage *image, in kimage_alloc_control_pages()
529 struct page *pages = NULL; in kimage_alloc_control_pages()
550 struct page *page; in kimage_add_entry() local
552 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); in kimage_add_entry()
553 if (!page) in kimage_add_entry()
556 ind_page = page_address(page); in kimage_add_entry()
583 static int kimage_add_page(struct kimage *image, unsigned long page) in kimage_add_page() argument
587 page &= PAGE_MASK; in kimage_add_page()
588 result = kimage_add_entry(image, page | IND_SOURCE); in kimage_add_page()
620 struct page *page; in kimage_free_entry() local
622 page = pfn_to_page(entry >> PAGE_SHIFT); in kimage_free_entry()
623 kimage_free_pages(page); in kimage_free_entry()
661 unsigned long page) in kimage_dst_used() argument
670 if (page == destination) in kimage_dst_used()
679 static struct page *kimage_alloc_page(struct kimage *image, in kimage_alloc_page()
701 struct page *page; in kimage_alloc_page() local
708 list_for_each_entry(page, &image->dest_pages, lru) { in kimage_alloc_page()
709 addr = page_to_pfn(page) << PAGE_SHIFT; in kimage_alloc_page()
711 list_del(&page->lru); in kimage_alloc_page()
712 return page; in kimage_alloc_page()
715 page = NULL; in kimage_alloc_page()
720 page = kimage_alloc_pages(gfp_mask, 0); in kimage_alloc_page()
721 if (!page) in kimage_alloc_page()
724 if (page_to_pfn(page) > in kimage_alloc_page()
726 list_add(&page->lru, &image->unuseable_pages); in kimage_alloc_page()
729 addr = page_to_pfn(page) << PAGE_SHIFT; in kimage_alloc_page()
749 struct page *old_page; in kimage_alloc_page()
753 copy_highpage(page, old_page); in kimage_alloc_page()
766 page = old_page; in kimage_alloc_page()
773 list_add(&page->lru, &image->dest_pages); in kimage_alloc_page()
777 return page; in kimage_alloc_page()
799 struct page *page; in kimage_load_normal_segment() local
803 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); in kimage_load_normal_segment()
804 if (!page) { in kimage_load_normal_segment()
808 result = kimage_add_page(image, page_to_pfn(page) in kimage_load_normal_segment()
813 ptr = kmap(page); in kimage_load_normal_segment()
826 kunmap(page); in kimage_load_normal_segment()
858 struct page *page; in kimage_load_crash_segment() local
862 page = pfn_to_page(maddr >> PAGE_SHIFT); in kimage_load_crash_segment()
863 if (!page) { in kimage_load_crash_segment()
867 ptr = kmap(page); in kimage_load_crash_segment()
880 kexec_flush_icache_page(page); in kimage_load_crash_segment()
881 kunmap(page); in kimage_load_crash_segment()
1386 VMCOREINFO_STRUCT_SIZE(page); in crash_save_vmcoreinfo_init()
1392 VMCOREINFO_OFFSET(page, flags); in crash_save_vmcoreinfo_init()
1393 VMCOREINFO_OFFSET(page, _count); in crash_save_vmcoreinfo_init()
1394 VMCOREINFO_OFFSET(page, mapping); in crash_save_vmcoreinfo_init()
1395 VMCOREINFO_OFFSET(page, lru); in crash_save_vmcoreinfo_init()