Lines Matching +full:linear +full:- +full:mapping +full:- +full:mode
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2020 FORTH-ICS/CARV
20 #include <linux/dma-map-ops.h>
99 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t, in print_mlk()
100 (((t) - (b)) >> LOG2_SZ_1K)); in print_mlk()
105 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t, in print_mlm()
106 (((t) - (b)) >> LOG2_SZ_1M)); in print_mlm()
111 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld GB)\n", name, b, t, in print_mlg()
112 (((t) - (b)) >> LOG2_SZ_1G)); in print_mlg()
118 pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld TB)\n", name, b, t, in print_mlt()
119 (((t) - (b)) >> LOG2_SZ_1T)); in print_mlt()
127 unsigned long diff = t - b; in print_ml()
218 * map the kernel in the linear mapping as read-only: we do not want in setup_bootmem()
222 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; in setup_bootmem()
226 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); in setup_bootmem()
230 * at worst, we map the linear mapping with PMD mappings. in setup_bootmem()
240 * In 64-bit, any use of __va/__pa before this point is wrong as we in setup_bootmem()
244 kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; in setup_bootmem()
247 * The size of the linear page mapping may restrict the amount of in setup_bootmem()
253 max_mapped_addr - phys_ram_base); in setup_bootmem()
258 * addresses greater than (void *)(-PAGE_SIZE) because: in setup_bootmem()
259 * - This memory would overlap with ERR_PTR in setup_bootmem()
260 * - This memory belongs to high memory, which is not supported in setup_bootmem()
262 * This is not applicable to 64-bit kernel, because virtual addresses in setup_bootmem()
263 * after (void *)(-PAGE_SIZE) are not linearly mapped: they are in setup_bootmem()
264 * occupied by kernel mapping. Also it is unrealistic for high memory in setup_bootmem()
265 * to exist on 64-bit platforms. in setup_bootmem()
268 max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE); in setup_bootmem()
269 memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr); in setup_bootmem()
278 set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); in setup_bootmem()
300 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); in setup_bootmem()
456 BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT); in alloc_pmd_early()
519 /* Only one PUD is available for early mapping */ in alloc_pud_early()
520 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); in alloc_pud_early()
557 /* Only one P4D is available for early mapping */ in alloc_p4d_early()
558 BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); in alloc_p4d_early()
696 !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) in best_map_size()
700 !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) in best_map_size()
704 !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) in best_map_size()
719 size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata)); in __copy_data()
732 * In 64-bit kernel, the kernel mapping is outside the linear mapping so in pgprot_from_va()
733 * we must protect its linear mapping alias from being executed and in pgprot_from_va()
781 pr_info("Disabled 4-level and 5-level paging"); in print_no4lvl()
788 pr_info("Disabled 5-level paging"); in print_no5lvl()
794 * There is a simple way to determine if 4-level is supported by the
795 * underlying hardware: establish 1:1 mapping in 4-level page table mode
858 * setup_vm() is called from head.S with MMU-off.
862 * 1) It should use PC-relative addressing for accessing kernel symbols.
885 uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR; in relocate_kernel()
890 uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr; in relocate_kernel()
893 Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset); in relocate_kernel()
894 Elf64_Addr relocated_addr = rela->r_addend; in relocate_kernel()
896 if (rela->r_info != R_RISCV_RELATIVE) in relocate_kernel()
903 * mm->context.vdso in VDSO_OFFSET macro. in relocate_kernel()
923 kernel_map.xiprom + (va - kernel_map.virt_addr), in create_kernel_page_table()
930 kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), in create_kernel_page_table()
941 kernel_map.phys_addr + (va - kernel_map.virt_addr), in create_kernel_page_table()
949 * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
950 * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
957 uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); in create_fdt_early_page_table()
962 /* In 32-bit only, the fdt lies in its own PGD */ in create_fdt_early_page_table()
973 dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1)); in create_fdt_early_page_table()
976 * For 64-bit kernel, __va can't be used since it would return a linear in create_fdt_early_page_table()
977 * mapping address whereas dtb_early_va will be used before in create_fdt_early_page_table()
978 * setup_vm_final installs the linear mapping. For 32-bit kernel, as the in create_fdt_early_page_table()
979 * kernel is mapped in the linear mapping, that makes no difference. in create_fdt_early_page_table()
1008 * map the allocated physical pages since the linear mapping does not exist yet.
1069 u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start); in setup_vm()
1077 nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE; in setup_vm()
1092 kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); in setup_vm()
1099 kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start); in setup_vm()
1101 kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; in setup_vm()
1105 kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; in setup_vm()
1113 * In 64-bit, we defer the setup of va_pa_offset to setup_bootmem, in setup_vm()
1116 * for the linear mapping. This is only possible because the kernel in setup_vm()
1117 * mapping lies outside the linear mapping. in setup_vm()
1118 * In 32-bit however, as the kernel resides in the linear mapping, in setup_vm()
1119 * setup_vm_final can not change the mapping established here, in setup_vm()
1125 0UL : PAGE_OFFSET - kernel_map.phys_addr; in setup_vm()
1126 kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; in setup_vm()
1129 * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit in setup_vm()
1130 * kernel, whereas for 64-bit kernel, the end of the virtual address in setup_vm()
1132 * the available size of the linear mapping. in setup_vm()
1134 memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0); in setup_vm()
1145 BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); in setup_vm()
1155 BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size); in setup_vm()
1206 /* Setup early mapping for FDT early scan */ in setup_vm()
1210 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap in setup_vm()
1252 best_map_size(pa, va, end - pa); in create_linear_mapping_range()
1267 phys_addr_t ktext_size = __init_data_begin - _start; in create_linear_mapping_page_table()
1269 phys_addr_t krodata_size = _data - __start_rodata; in create_linear_mapping_page_table()
1279 * before we setup the linear mapping so that we avoid using hugepages in create_linear_mapping_page_table()
1289 /* Map all memory banks in the linear mapping */ in create_linear_mapping_page_table()
1323 * In 32-bit, the device tree lies in a pgd entry, so it must be copied in setup_vm_final()
1335 /* Map the linear mapping */ in setup_vm_final()
1380 return -ENOMEM; in reserve_crashkernel_low()
1383 pr_info("crashkernel low memory reserved: 0x%016llx - 0x%016llx (%lld MB)\n", in reserve_crashkernel_low()
1387 crashk_low_res.end = low_base + low_size - 1; in reserve_crashkernel_low()
1393 * reserve_crashkernel() - reserves memory for crash kernel
1426 if (ret == -ENOENT) { in reserve_crashkernel()
1437 if (ret == -ENOENT) in reserve_crashkernel()
1510 pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n", in reserve_crashkernel()
1514 crashk_res.end = crash_base + crash_size - 1; in reserve_crashkernel()
1522 /* Depend on that Linear Mapping is ready */ in paging_init()
1550 * Pre-allocates page-table pages for a specific area in the kernel
1551 * page-table. Only the level which needs to be synchronized between
1552 * all page-tables is allocated because the synchronization can be
1593 * process page-tables later. in preallocate_pgd_pages_range()
1595 panic("Failed to pre-allocate %s pages for %s area\n", lvl, area); in preallocate_pgd_pages_range()