• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/arch/arm/mm/nommu.c
3  *
4  * ARM uCLinux supporting functions.
5  */
6 #include <linux/module.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/io.h>
10 #include <linux/memblock.h>
11 
12 #include <asm/cacheflush.h>
13 #include <asm/sections.h>
14 #include <asm/page.h>
15 #include <asm/setup.h>
16 #include <asm/traps.h>
17 #include <asm/mach/arch.h>
18 
19 #include "mm.h"
20 
arm_mm_memblock_reserve(void)21 void __init arm_mm_memblock_reserve(void)
22 {
23 	/*
24 	 * Register the exception vector page.
25 	 * some architectures which the DRAM is the exception vector to trap,
26 	 * alloc_page breaks with error, although it is not NULL, but "0."
27 	 */
28 	memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
29 }
30 
sanity_check_meminfo(void)31 void __init sanity_check_meminfo(void)
32 {
33 	phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]);
34 	high_memory = __va(end - 1) + 1;
35 }
36 
37 /*
38  * paging_init() sets up the page tables, initialises the zone memory
39  * maps, and sets up the zero page, bad page and bad page tables.
40  */
paging_init(struct machine_desc * mdesc)41 void __init paging_init(struct machine_desc *mdesc)
42 {
43 	early_trap_init((void *)CONFIG_VECTORS_BASE);
44 	bootmem_init();
45 }
46 
47 /*
48  * We don't need to do anything here for nommu machines.
49  */
setup_mm_for_reboot(void)50 void setup_mm_for_reboot(void)
51 {
52 }
53 
flush_dcache_page(struct page * page)54 void flush_dcache_page(struct page *page)
55 {
56 	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
57 }
58 EXPORT_SYMBOL(flush_dcache_page);
59 
flush_kernel_dcache_page(struct page * page)60 void flush_kernel_dcache_page(struct page *page)
61 {
62 	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
63 }
64 EXPORT_SYMBOL(flush_kernel_dcache_page);
65 
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long uaddr,void * dst,const void * src,unsigned long len)66 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
67 		       unsigned long uaddr, void *dst, const void *src,
68 		       unsigned long len)
69 {
70 	memcpy(dst, src, len);
71 	if (vma->vm_flags & VM_EXEC)
72 		__cpuc_coherent_user_range(uaddr, uaddr + len);
73 }
74 
__arm_ioremap_pfn(unsigned long pfn,unsigned long offset,size_t size,unsigned int mtype)75 void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
76 				size_t size, unsigned int mtype)
77 {
78 	if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
79 		return NULL;
80 	return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
81 }
82 EXPORT_SYMBOL(__arm_ioremap_pfn);
83 
__arm_ioremap_pfn_caller(unsigned long pfn,unsigned long offset,size_t size,unsigned int mtype,void * caller)84 void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
85 			   size_t size, unsigned int mtype, void *caller)
86 {
87 	return __arm_ioremap_pfn(pfn, offset, size, mtype);
88 }
89 
__arm_ioremap(unsigned long phys_addr,size_t size,unsigned int mtype)90 void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
91 			    unsigned int mtype)
92 {
93 	return (void __iomem *)phys_addr;
94 }
95 EXPORT_SYMBOL(__arm_ioremap);
96 
97 void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *);
98 
__arm_ioremap_caller(unsigned long phys_addr,size_t size,unsigned int mtype,void * caller)99 void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
100 				   unsigned int mtype, void *caller)
101 {
102 	return __arm_ioremap(phys_addr, size, mtype);
103 }
104 
105 void (*arch_iounmap)(volatile void __iomem *);
106 
__arm_iounmap(volatile void __iomem * addr)107 void __arm_iounmap(volatile void __iomem *addr)
108 {
109 }
110 EXPORT_SYMBOL(__arm_iounmap);
111