• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (C) 2005,2006,2007,2008,2009 Imagination Technologies
3  *
4  * Meta 1 MMU handling code.
5  *
6  */
7 
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/io.h>
11 
12 #include <asm/mmu.h>
13 
14 #define DM3_BASE (LINSYSDIRECT_BASE + (MMCU_DIRECTMAPn_ADDR_SCALE * 3))
15 
16 /*
17  * This contains the physical address of the top level 2k pgd table.
18  */
19 static unsigned long mmu_base_phys;
20 
21 /*
22  * Given a physical address, return a mapped virtual address that can be used
23  * to access that location.
24  * In practice, we use the DirectMap region to make this happen.
25  */
map_addr(unsigned long phys)26 static unsigned long map_addr(unsigned long phys)
27 {
28 	static unsigned long dm_base = 0xFFFFFFFF;
29 	int offset;
30 
31 	offset = phys - dm_base;
32 
33 	/* Are we in the current map range ? */
34 	if ((offset < 0) || (offset >= MMCU_DIRECTMAPn_ADDR_SCALE)) {
35 		/* Calculate new DM area */
36 		dm_base = phys & ~(MMCU_DIRECTMAPn_ADDR_SCALE - 1);
37 
38 		/* Actually map it in! */
39 		metag_out32(dm_base, MMCU_DIRECTMAP3_ADDR);
40 
41 		/* And calculate how far into that area our reference is */
42 		offset = phys - dm_base;
43 	}
44 
45 	return DM3_BASE + offset;
46 }
47 
48 /*
49  * Return the physical address of the base of our pgd table.
50  */
__get_mmu_base(void)51 static inline unsigned long __get_mmu_base(void)
52 {
53 	unsigned long base_phys;
54 	unsigned int stride;
55 
56 	if (is_global_space(PAGE_OFFSET))
57 		stride = 4;
58 	else
59 		stride = hard_processor_id();	/* [0..3] */
60 
61 	base_phys = metag_in32(MMCU_TABLE_PHYS_ADDR);
62 	base_phys += (0x800 * stride);
63 
64 	return base_phys;
65 }
66 
67 /* Given a virtual address, return the virtual address of the relevant pgd */
pgd_entry_addr(unsigned long virt)68 static unsigned long pgd_entry_addr(unsigned long virt)
69 {
70 	unsigned long pgd_phys;
71 	unsigned long pgd_virt;
72 
73 	if (!mmu_base_phys)
74 		mmu_base_phys = __get_mmu_base();
75 
76 	/*
77 	 * Are we trying to map a global address.  If so, then index
78 	 * the global pgd table instead of our local one.
79 	 */
80 	if (is_global_space(virt)) {
81 		/* Scale into 2gig map */
82 		virt &= ~0x80000000;
83 	}
84 
85 	/* Base of the pgd table plus our 4Meg entry, 4bytes each */
86 	pgd_phys = mmu_base_phys + ((virt >> PGDIR_SHIFT) * 4);
87 
88 	pgd_virt = map_addr(pgd_phys);
89 
90 	return pgd_virt;
91 }
92 
93 /* Given a virtual address, return the virtual address of the relevant pte */
pgtable_entry_addr(unsigned long virt)94 static unsigned long pgtable_entry_addr(unsigned long virt)
95 {
96 	unsigned long pgtable_phys;
97 	unsigned long pgtable_virt, pte_virt;
98 
99 	/* Find the physical address of the 4MB page table*/
100 	pgtable_phys = metag_in32(pgd_entry_addr(virt)) & MMCU_ENTRY_ADDR_BITS;
101 
102 	/* Map it to a virtual address */
103 	pgtable_virt = map_addr(pgtable_phys);
104 
105 	/* And index into it for our pte */
106 	pte_virt = pgtable_virt + ((virt >> PAGE_SHIFT) & 0x3FF) * 4;
107 
108 	return pte_virt;
109 }
110 
mmu_read_first_level_page(unsigned long vaddr)111 unsigned long mmu_read_first_level_page(unsigned long vaddr)
112 {
113 	return metag_in32(pgd_entry_addr(vaddr));
114 }
115 
mmu_read_second_level_page(unsigned long vaddr)116 unsigned long mmu_read_second_level_page(unsigned long vaddr)
117 {
118 	return metag_in32(pgtable_entry_addr(vaddr));
119 }
120 
mmu_get_base(void)121 unsigned long mmu_get_base(void)
122 {
123 	static unsigned long __base;
124 
125 	/* Find the base of our MMU pgd table */
126 	if (!__base)
127 		__base = pgd_entry_addr(0);
128 
129 	return __base;
130 }
131 
mmu_init(unsigned long mem_end)132 void __init mmu_init(unsigned long mem_end)
133 {
134 	unsigned long entry, addr;
135 	pgd_t *p_swapper_pg_dir;
136 
137 	/*
138 	 * Now copy over any MMU pgd entries already in the mmu page tables
139 	 * over to our root init process (swapper_pg_dir) map.  This map is
140 	 * then inherited by all other processes, which means all processes
141 	 * inherit a map of the kernel space.
142 	 */
143 	addr = PAGE_OFFSET;
144 	entry = pgd_index(PAGE_OFFSET);
145 	p_swapper_pg_dir = pgd_offset_k(0) + entry;
146 
147 	while (addr <= META_MEMORY_LIMIT) {
148 		unsigned long pgd_entry;
149 		/* copy over the current MMU value */
150 		pgd_entry = mmu_read_first_level_page(addr);
151 		pgd_val(*p_swapper_pg_dir) = pgd_entry;
152 
153 		p_swapper_pg_dir++;
154 		addr += PGDIR_SIZE;
155 		entry++;
156 	}
157 }
158