Lines Matching +full:vm +full:- +full:map
4 * Copyright (C) 1995-2005 Russell King
37 #include <asm/mach/map.h>
47 * zero-initialized data and COW.
53 * The pmd table for the upper-most set of pages.
161 int i, selected = -1; in early_cachepolicy()
172 if (selected == -1) in early_cachepolicy()
259 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
461 pr_warn("Forcing write-allocate cache policy for SMP\n"); in build_mem_type_table()
472 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those in build_mem_type_table()
484 * "update-able on write" bit on ARM610). However, Xscale and in build_mem_type_table()
507 * Mark device regions on ARMv6+ as execute-never in build_mem_type_table()
521 * - shared device is SXCB=1100 in build_mem_type_table()
522 * - nonshared device is SXCB=0100 in build_mem_type_table()
523 * - write combine device mem is SXCB=0001 in build_mem_type_table()
532 * - shared device is TEXCB=00101 in build_mem_type_table()
533 * - nonshared device is TEXCB=01000 in build_mem_type_table()
534 * - write combine device mem is TEXCB=00100 in build_mem_type_table()
543 * - shared device is TEXCB=00001 in build_mem_type_table()
544 * - nonshared device is TEXCB=01000 in build_mem_type_table()
545 * - write combine device mem is TEXCB=00100 in build_mem_type_table()
560 * Now deal with the memory-type mappings in build_mem_type_table()
563 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; in build_mem_type_table()
564 s2_pgprot = cp->pte_s2; in build_mem_type_table()
572 * r/o, kernel r/w to map the vectors page. in build_mem_type_table()
579 * in the Short-descriptor translation table format descriptors. in build_mem_type_table()
626 * Non-cacheable Normal - intended for memory areas that must in build_mem_type_table()
631 /* Non-cacheable Normal is XCB = 001 */ in build_mem_type_table()
635 /* For both ARMv6 and non-TEX-remapping ARMv7 */ in build_mem_type_table()
678 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; in build_mem_type_table()
680 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; in build_mem_type_table()
684 mem_types[MT_ROM].prot_sect |= cp->pmd; in build_mem_type_table()
686 switch (cp->pmd) { in build_mem_type_table()
696 ecc_mask ? "ECC enabled, " : "", cp->policy); in build_mem_type_table()
700 if (t->prot_l1) in build_mem_type_table()
701 t->prot_l1 |= PMD_DOMAIN(t->domain); in build_mem_type_table()
702 if (t->prot_sect) in build_mem_type_table()
703 t->prot_sect |= PMD_DOMAIN(t->domain); in build_mem_type_table()
713 else if (file->f_flags & O_SYNC) in phys_mem_access_prot()
767 pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc); in alloc_init_pte()
769 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), in alloc_init_pte()
789 * (See arch/arm/include/asm/pgtable-2level.h) in __map_init_section()
795 *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0)); in __map_init_section()
812 * With LPAE, we must loop over to map in alloc_init_pmd()
818 * Try a section mapping - addr, next and phys must all be in alloc_init_pmd()
821 if (type->prot_sect && in alloc_init_pmd()
829 phys += next - addr; in alloc_init_pmd()
845 phys += next - addr; in alloc_init_pud()
859 addr = md->virtual; in create_36bit_mapping()
860 phys = __pfn_to_phys(md->pfn); in create_36bit_mapping()
861 length = PAGE_ALIGN(md->length); in create_36bit_mapping()
865 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
875 if (type->domain) { in create_36bit_mapping()
877 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
881 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { in create_36bit_mapping()
883 (long long)__pfn_to_phys((u64)md->pfn), addr); in create_36bit_mapping()
891 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); in create_36bit_mapping()
901 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER | in create_36bit_mapping()
920 type = &mem_types[md->type]; in __create_mapping()
924 * Catch 36-bit addresses in __create_mapping()
926 if (md->pfn >= 0x100000) { in __create_mapping()
932 addr = md->virtual & PAGE_MASK; in __create_mapping()
933 phys = __pfn_to_phys(md->pfn); in __create_mapping()
934 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); in __create_mapping()
936 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { in __create_mapping()
937 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n", in __create_mapping()
938 (long long)__pfn_to_phys(md->pfn), addr); in __create_mapping()
949 phys += next - addr; in __create_mapping()
963 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { in create_mapping()
965 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping()
969 if ((md->type == MT_DEVICE || md->type == MT_ROM) && in create_mapping()
970 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START && in create_mapping()
971 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { in create_mapping()
973 (long long)__pfn_to_phys((u64)md->pfn), md->virtual); in create_mapping()
983 pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); in create_mapping_late()
997 struct vm_struct *vm; in iotable_init() local
1005 for (md = io_desc; nr; md++, nr--) { in iotable_init()
1008 vm = &svm->vm; in iotable_init()
1009 vm->addr = (void *)(md->virtual & PAGE_MASK); in iotable_init()
1010 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); in iotable_init()
1011 vm->phys_addr = __pfn_to_phys(md->pfn); in iotable_init()
1012 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; in iotable_init()
1013 vm->flags |= VM_ARM_MTYPE(md->type); in iotable_init()
1014 vm->caller = iotable_init; in iotable_init()
1022 struct vm_struct *vm; in vm_reserve_area_early() local
1027 vm = &svm->vm; in vm_reserve_area_early()
1028 vm->addr = (void *)addr; in vm_reserve_area_early()
1029 vm->size = size; in vm_reserve_area_early()
1030 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; in vm_reserve_area_early()
1031 vm->caller = caller; in vm_reserve_area_early()
1039 * (see definition in include/asm/pgtable-2level.h). However a call to
1046 * Let's avoid the issue by inserting dummy vm entries covering the unused
1058 struct vm_struct *vm; in fill_pmd_gaps() local
1063 vm = &svm->vm; in fill_pmd_gaps()
1064 addr = (unsigned long)vm->addr; in fill_pmd_gaps()
1069 * Check if this vm starts on an odd section boundary. in fill_pmd_gaps()
1080 * Then check if this vm ends on an odd section boundary. in fill_pmd_gaps()
1084 addr += vm->size; in fill_pmd_gaps()
1091 /* no need to look at any vm entry until we hit the next PMD */ in fill_pmd_gaps()
1092 next = (addr + PMD_SIZE - 1) & PMD_MASK; in fill_pmd_gaps()
1118 struct map_desc map; in debug_ll_io_init() local
1120 debug_ll_addr(&map.pfn, &map.virtual); in debug_ll_io_init()
1121 if (!map.pfn || !map.virtual) in debug_ll_io_init()
1123 map.pfn = __phys_to_pfn(map.pfn); in debug_ll_io_init()
1124 map.virtual &= PAGE_MASK; in debug_ll_io_init()
1125 map.length = PAGE_SIZE; in debug_ll_io_init()
1126 map.type = MT_DEVICE; in debug_ll_io_init()
1127 iotable_init(&map, 1); in debug_ll_io_init()
1132 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
1137 * area - the default is 240m.
1149 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { in early_vmalloc()
1150 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); in early_vmalloc()
1155 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); in early_vmalloc()
1171 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4. in adjust_lowmem_bounds()
1176 vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; in adjust_lowmem_bounds()
1184 if (!IS_ALIGNED(reg->base, PMD_SIZE)) { in adjust_lowmem_bounds()
1187 len = round_up(reg->base, PMD_SIZE) - reg->base; in adjust_lowmem_bounds()
1188 memblock_mark_nomap(reg->base, len); in adjust_lowmem_bounds()
1195 phys_addr_t block_start = reg->base; in adjust_lowmem_bounds()
1196 phys_addr_t block_end = reg->base + reg->size; in adjust_lowmem_bounds()
1201 if (reg->base < vmalloc_limit) { in adjust_lowmem_bounds()
1214 * Find the first non-pmd-aligned page, and point in adjust_lowmem_bounds()
1216 * limit down to be pmd-aligned, which happens at the in adjust_lowmem_bounds()
1220 * bank can be non-pmd-aligned. The only exception is in adjust_lowmem_bounds()
1221 * that the start of the bank 0 must be section- in adjust_lowmem_bounds()
1238 high_memory = __va(arm_lowmem_limit - 1) + 1; in adjust_lowmem_bounds()
1254 pr_notice("Ignoring RAM at %pa-%pa\n", in adjust_lowmem_bounds()
1258 memblock_remove(memblock_limit, end - memblock_limit); in adjust_lowmem_bounds()
1277 /* The XIP kernel is mapped in the module area -- skip over it */ in prepare_page_table()
1278 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; in prepare_page_table()
1321 * precious DMA-able memory... in arm_mm_memblock_reserve()
1323 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); in arm_mm_memblock_reserve()
1336 struct map_desc map; in devicemaps_init() local
1354 * Map the kernel if it is XIP. in devicemaps_init()
1358 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); in devicemaps_init()
1359 map.virtual = MODULES_VADDR; in devicemaps_init()
1360 map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; in devicemaps_init()
1361 map.type = MT_ROM; in devicemaps_init()
1362 create_mapping(&map); in devicemaps_init()
1366 * Map the cache flushing regions. in devicemaps_init()
1369 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); in devicemaps_init()
1370 map.virtual = FLUSH_BASE; in devicemaps_init()
1371 map.length = SZ_1M; in devicemaps_init()
1372 map.type = MT_CACHECLEAN; in devicemaps_init()
1373 create_mapping(&map); in devicemaps_init()
1376 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); in devicemaps_init()
1377 map.virtual = FLUSH_BASE_MINICACHE; in devicemaps_init()
1378 map.length = SZ_1M; in devicemaps_init()
1379 map.type = MT_MINICLEAN; in devicemaps_init()
1380 create_mapping(&map); in devicemaps_init()
1384 * Create a mapping for the machine vectors at the high-vectors in devicemaps_init()
1385 * location (0xffff0000). If we aren't using high-vectors, also in devicemaps_init()
1386 * create a mapping at the low-vectors virtual address. in devicemaps_init()
1388 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); in devicemaps_init()
1389 map.virtual = 0xffff0000; in devicemaps_init()
1390 map.length = PAGE_SIZE; in devicemaps_init()
1392 map.type = MT_HIGH_VECTORS; in devicemaps_init()
1394 map.type = MT_LOW_VECTORS; in devicemaps_init()
1396 create_mapping(&map); in devicemaps_init()
1399 map.virtual = 0; in devicemaps_init()
1400 map.length = PAGE_SIZE * 2; in devicemaps_init()
1401 map.type = MT_LOW_VECTORS; in devicemaps_init()
1402 create_mapping(&map); in devicemaps_init()
1405 /* Now create a kernel read-only mapping */ in devicemaps_init()
1406 map.pfn += 1; in devicemaps_init()
1407 map.virtual = 0xffff0000 + PAGE_SIZE; in devicemaps_init()
1408 map.length = PAGE_SIZE; in devicemaps_init()
1409 map.type = MT_LOW_VECTORS; in devicemaps_init()
1410 create_mapping(&map); in devicemaps_init()
1413 * Ask the machine support to map in the statically mapped devices. in devicemaps_init()
1415 if (mdesc->map_io) in devicemaps_init()
1416 mdesc->map_io(); in devicemaps_init()
1427 * any write-allocated cache lines in the vector page are written in devicemaps_init()
1454 /* Map all the lowmem memory banks. */ in map_lowmem()
1456 phys_addr_t start = reg->base; in map_lowmem()
1457 phys_addr_t end = start + reg->size; in map_lowmem()
1458 struct map_desc map; in map_lowmem() local
1469 map.pfn = __phys_to_pfn(start); in map_lowmem()
1470 map.virtual = __phys_to_virt(start); in map_lowmem()
1471 map.length = end - start; in map_lowmem()
1472 map.type = MT_MEMORY_RWX; in map_lowmem()
1474 create_mapping(&map); in map_lowmem()
1476 map.pfn = __phys_to_pfn(start); in map_lowmem()
1477 map.virtual = __phys_to_virt(start); in map_lowmem()
1478 map.length = end - start; in map_lowmem()
1479 map.type = MT_MEMORY_RW; in map_lowmem()
1481 create_mapping(&map); in map_lowmem()
1485 map.pfn = __phys_to_pfn(start); in map_lowmem()
1486 map.virtual = __phys_to_virt(start); in map_lowmem()
1487 map.length = kernel_x_start - start; in map_lowmem()
1488 map.type = MT_MEMORY_RW; in map_lowmem()
1490 create_mapping(&map); in map_lowmem()
1493 map.pfn = __phys_to_pfn(kernel_x_start); in map_lowmem()
1494 map.virtual = __phys_to_virt(kernel_x_start); in map_lowmem()
1495 map.length = kernel_x_end - kernel_x_start; in map_lowmem()
1496 map.type = MT_MEMORY_RWX; in map_lowmem()
1498 create_mapping(&map); in map_lowmem()
1501 map.pfn = __phys_to_pfn(kernel_x_end); in map_lowmem()
1502 map.virtual = __phys_to_virt(kernel_x_end); in map_lowmem()
1503 map.length = end - kernel_x_end; in map_lowmem()
1504 map.type = MT_MEMORY_RW; in map_lowmem()
1506 create_mapping(&map); in map_lowmem()
1529 if (!mdesc->pv_fixup) in early_paging_init()
1532 offset = mdesc->pv_fixup(); in early_paging_init()
1550 /* Re-set the phys pfn offset, and the pv offset */ in early_paging_init()
1556 (&__pv_table_end - &__pv_table_begin) << 2); in early_paging_init()
1575 * Fixup the page tables - this must be in the idmap region as in early_paging_init()
1582 /* Re-enable the caches and cacheable TLB walks */ in early_paging_init()
1593 if (!mdesc->pv_fixup) in early_paging_init()
1596 offset = mdesc->pv_fixup(); in early_paging_init()
1611 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1); in early_fixmap_shutdown()
1619 struct map_desc map; in early_fixmap_shutdown() local
1621 map.virtual = fix_to_virt(i); in early_fixmap_shutdown()
1622 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual); in early_fixmap_shutdown()
1629 map.pfn = pte_pfn(*pte); in early_fixmap_shutdown()
1630 map.type = MT_DEVICE; in early_fixmap_shutdown()
1631 map.length = PAGE_SIZE; in early_fixmap_shutdown()
1633 create_mapping(&map); in early_fixmap_shutdown()
1665 kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset); in paging_init()