Lines Matching refs:region
481 static void store_slot_info(struct mem_vector *region, unsigned long image_size) in store_slot_info() argument
488 slot_area.addr = region->start; in store_slot_info()
489 slot_area.num = (region->size - image_size) / in store_slot_info()
503 process_gb_huge_pages(struct mem_vector *region, unsigned long image_size) in process_gb_huge_pages() argument
510 store_slot_info(region, image_size); in process_gb_huge_pages()
514 addr = ALIGN(region->start, PUD_SIZE); in process_gb_huge_pages()
516 if (addr < region->start + region->size) in process_gb_huge_pages()
517 size = region->size - (addr - region->start); in process_gb_huge_pages()
528 store_slot_info(region, image_size); in process_gb_huge_pages()
538 if (addr >= region->start + image_size) { in process_gb_huge_pages()
539 tmp.start = region->start; in process_gb_huge_pages()
540 tmp.size = addr - region->start; in process_gb_huge_pages()
544 size = region->size - (addr - region->start) - i * PUD_SIZE; in process_gb_huge_pages()
580 struct mem_vector region, overlap; in __process_mem_region() local
599 region.start = cur_entry.start; in __process_mem_region()
600 region.size = cur_entry.size; in __process_mem_region()
604 start_orig = region.start; in __process_mem_region()
607 if (region.start < minimum) in __process_mem_region()
608 region.start = minimum; in __process_mem_region()
611 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN); in __process_mem_region()
614 if (region.start > cur_entry.start + cur_entry.size) in __process_mem_region()
618 region.size -= region.start - start_orig; in __process_mem_region()
622 region.start + region.size > KERNEL_IMAGE_SIZE) in __process_mem_region()
623 region.size = KERNEL_IMAGE_SIZE - region.start; in __process_mem_region()
626 if (region.size < image_size) in __process_mem_region()
630 if (!mem_avoid_overlap(®ion, &overlap)) { in __process_mem_region()
631 process_gb_huge_pages(®ion, image_size); in __process_mem_region()
636 if (overlap.start > region.start + image_size) { in __process_mem_region()
639 beginning.start = region.start; in __process_mem_region()
640 beginning.size = overlap.start - region.start; in __process_mem_region()
645 if (overlap.start + overlap.size >= region.start + region.size) in __process_mem_region()
649 region.size -= overlap.start - region.start + overlap.size; in __process_mem_region()
650 region.start = overlap.start + overlap.size; in __process_mem_region()
654 static bool process_mem_region(struct mem_vector *region, in process_mem_region() argument
664 __process_mem_region(region, minimum, image_size); in process_mem_region()
682 if (!mem_overlaps(region, &immovable_mem[i])) in process_mem_region()
687 region_end = region->start + region->size; in process_mem_region()
689 entry.start = clamp(region->start, start, end); in process_mem_region()
714 struct mem_vector region; in process_efi_entries() local
767 region.start = md->phys_addr; in process_efi_entries()
768 region.size = md->num_pages << EFI_PAGE_SHIFT; in process_efi_entries()
769 if (process_mem_region(®ion, minimum, image_size)) in process_efi_entries()
786 struct mem_vector region; in process_e820_entries() local
795 region.start = entry->addr; in process_e820_entries()
796 region.size = entry->size; in process_e820_entries()
797 if (process_mem_region(®ion, minimum, image_size)) in process_e820_entries()