• Home
  • Raw
  • Download

Lines Matching +full:start +full:- +full:up

27 #include <asm/text-patching.h>
43 * WC and WT fall back to UC-. pat_init() updates these values to support
45 * for the details. Note, __early_ioremap() used during early boot-time
82 * Check that the write-protect PAT entry is set for write-protect.
83 * To do this without making assumptions how PAT has been set up (Xen has
169 * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping.
206 unsigned long start; member
216 * up after us can get the correct flags. Invoked on the boot CPU.
251 /* Except when with PTI where the kernel is mostly non-Global: */ in probe_page_size_mask()
291 boot_cpu_data.microcode < invlpg_miss_match->driver_data) { in setup_pcid()
299 * This can't be cr4_set_bits_and_update_boot() -- the in setup_pcid()
306 * Instead, we brute-force it and set CR4.PCIDE manually in in setup_pcid()
335 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr()
356 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask() local
364 if (memblock_is_region_memory(start, end - start)) in adjust_range_page_size_mask()
369 unsigned long start = round_down(mr[i].start, PUD_SIZE); in adjust_range_page_size_mask() local
372 if (memblock_is_region_memory(start, end - start)) in adjust_range_page_size_mask()
385 if (mr->page_size_mask & (1<<PG_LEVEL_1G)) in page_size_string()
388 * 32-bit without PAE has a 4M large page size. in page_size_string()
394 mr->page_size_mask & (1<<PG_LEVEL_2M)) in page_size_string()
397 if (mr->page_size_mask & (1<<PG_LEVEL_2M)) in page_size_string()
404 unsigned long start, in split_mem_range() argument
414 pfn = start_pfn = PFN_DOWN(start); in split_mem_range()
482 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { in split_mem_range()
484 if (mr[i].end != mr[i+1].start || in split_mem_range()
488 old_start = mr[i].start; in split_mem_range()
490 (nr_range - 1 - i) * sizeof(struct map_range)); in split_mem_range()
491 mr[i--].start = old_start; in split_mem_range()
492 nr_range--; in split_mem_range()
496 pr_debug(" [mem %#010lx-%#010lx] page %s\n", in split_mem_range()
497 mr[i].start, mr[i].end - 1, in split_mem_range()
514 if (start_pfn < (1UL<<(32-PAGE_SHIFT))) in add_pfn_range_mapped()
516 min(end_pfn, 1UL<<(32-PAGE_SHIFT))); in add_pfn_range_mapped()
524 if ((start_pfn >= pfn_mapped[i].start) && in pfn_range_is_mapped()
536 unsigned long __ref init_memory_mapping(unsigned long start, in init_memory_mapping() argument
543 pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n", in init_memory_mapping()
544 start, end - 1); in init_memory_mapping()
547 nr_range = split_mem_range(mr, 0, start, end); in init_memory_mapping()
550 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, in init_memory_mapping()
554 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); in init_memory_mapping()
581 u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end); in init_range_memory_mapping() local
583 if (start >= end) in init_range_memory_mapping()
590 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= in init_range_memory_mapping()
592 init_memory_mapping(start, end, PAGE_KERNEL); in init_range_memory_mapping()
593 mapped_ram_size += end - start; in init_range_memory_mapping()
610 * Don't need to worry about overflow in the top-down case, on 32bit, in get_new_step_size()
611 * when step_size is 0, round_down() returns 0 for start, and that in get_new_step_size()
613 * In the bottom-up case, round_up(x, 0) returns 0 though too, which in get_new_step_size()
616 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); in get_new_step_size()
620 * memory_map_top_down - Map [map_start, map_end) top down
621 * @map_start: start address of the target memory range
625 * [map_start, map_end) in top-down. That said, the page tables
627 * memory in top-down.
641 * Start with top-most PMD_SIZE range aligned at PMD_SIZE to ensure in memory_map_top_down()
662 * We start from the top (end of memory) and go to the bottom. in memory_map_top_down()
668 unsigned long start; in memory_map_top_down() local
671 start = round_down(last_start - 1, step_size); in memory_map_top_down()
672 if (start < map_start) in memory_map_top_down()
673 start = map_start; in memory_map_top_down()
675 start = map_start; in memory_map_top_down()
676 mapped_ram_size += init_range_memory_mapping(start, in memory_map_top_down()
678 last_start = start; in memory_map_top_down()
689 * memory_map_bottom_up - Map [map_start, map_end) bottom up
690 * @map_start: start address of the target memory range
694 * [map_start, map_end) in bottom-up. Since we have limited the
695 * bottom-up allocation above the kernel, the page tables will
697 * in [map_start, map_end) in bottom-up.
702 unsigned long next, start; in memory_map_bottom_up() local
707 start = map_start; in memory_map_bottom_up()
708 min_pfn_mapped = start >> PAGE_SHIFT; in memory_map_bottom_up()
711 * We start from the bottom (@map_start) and go to the top (@map_end). in memory_map_bottom_up()
716 while (start < map_end) { in memory_map_bottom_up()
717 if (step_size && map_end - start > step_size) { in memory_map_bottom_up()
718 next = round_up(start + 1, step_size); in memory_map_bottom_up()
725 mapped_ram_size += init_range_memory_mapping(start, next); in memory_map_bottom_up()
726 start = next; in memory_map_bottom_up()
742 * area. This limits the randomization granularity to 1GB for both 4-level
743 * and 5-level paging.
749 * The code below will alias kernel page-tables in the user-range of the in init_trampoline()
751 * be created when using the trampoline page-table. in init_trampoline()
781 * If the allocation is in bottom-up direction, we setup direct mapping in init_mem_mapping()
782 * in bottom-up, otherwise we setup direct mapping in top-down. in init_mem_mapping()
840 (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE); in poking_init()
846 * We need to trigger the allocation of the page-tables that will be in poking_init()
864 * Access has to be given to non-kernel-ram areas as well, these contain the
915 * mark them not present - any buggy init-section access will in free_init_pages()
919 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n", in free_init_pages()
920 begin, end - 1); in free_init_pages()
925 kmemleak_free_part((void *)begin, end - begin); in free_init_pages()
926 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
931 * writeable and non-executable first. in free_init_pages()
933 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
934 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); in free_init_pages()
950 unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT; in free_kernel_image_pages()
984 void __init free_initrd_mem(unsigned long start, unsigned long end) in free_initrd_mem() argument
990 * - i386_start_kernel() in free_initrd_mem()
991 * - x86_64_start_kernel() in free_initrd_mem()
992 * - relocate_initrd() in free_initrd_mem()
995 free_init_pages("initrd", start, PAGE_ALIGN(end)); in free_initrd_mem()
1001 * and pass it to the MM layer - to help it set zone watermarks more
1004 * Done on 64-bit systems only for the time being, although 32-bit systems
1025 nr_pages += end_pfn - start_pfn; in memblock_find_dma_reserve()
1039 nr_free_pages += end_pfn - start_pfn; in memblock_find_dma_reserve()
1042 set_dma_reserve(nr_pages - nr_free_pages); in memblock_find_dma_reserve()
1069 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
1079 /* entry 0 MUST be WB (hardwired to speed up translations) */ in update_cache_mode_entry()
1101 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; in arch_max_swapfile_size()