• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 2005,2006,2007,2008,2009 Imagination Technologies
4  *
5  * Meta 1 MMU handling code.
6  *
7  */
8 
9 #include <linux/sched.h>
10 #include <linux/mm.h>
11 #include <linux/io.h>
12 
13 #include <asm/mmu.h>
14 
15 #define DM3_BASE (LINSYSDIRECT_BASE + (MMCU_DIRECTMAPn_ADDR_SCALE * 3))
16 
17 /*
18  * This contains the physical address of the top level 2k pgd table.
19  */
20 static unsigned long mmu_base_phys;
21 
22 /*
23  * Given a physical address, return a mapped virtual address that can be used
24  * to access that location.
25  * In practice, we use the DirectMap region to make this happen.
26  */
map_addr(unsigned long phys)27 static unsigned long map_addr(unsigned long phys)
28 {
29 	static unsigned long dm_base = 0xFFFFFFFF;
30 	int offset;
31 
32 	offset = phys - dm_base;
33 
34 	/* Are we in the current map range ? */
35 	if ((offset < 0) || (offset >= MMCU_DIRECTMAPn_ADDR_SCALE)) {
36 		/* Calculate new DM area */
37 		dm_base = phys & ~(MMCU_DIRECTMAPn_ADDR_SCALE - 1);
38 
39 		/* Actually map it in! */
40 		metag_out32(dm_base, MMCU_DIRECTMAP3_ADDR);
41 
42 		/* And calculate how far into that area our reference is */
43 		offset = phys - dm_base;
44 	}
45 
46 	return DM3_BASE + offset;
47 }
48 
49 /*
50  * Return the physical address of the base of our pgd table.
51  */
__get_mmu_base(void)52 static inline unsigned long __get_mmu_base(void)
53 {
54 	unsigned long base_phys;
55 	unsigned int stride;
56 
57 	if (is_global_space(PAGE_OFFSET))
58 		stride = 4;
59 	else
60 		stride = hard_processor_id();	/* [0..3] */
61 
62 	base_phys = metag_in32(MMCU_TABLE_PHYS_ADDR);
63 	base_phys += (0x800 * stride);
64 
65 	return base_phys;
66 }
67 
68 /* Given a virtual address, return the virtual address of the relevant pgd */
pgd_entry_addr(unsigned long virt)69 static unsigned long pgd_entry_addr(unsigned long virt)
70 {
71 	unsigned long pgd_phys;
72 	unsigned long pgd_virt;
73 
74 	if (!mmu_base_phys)
75 		mmu_base_phys = __get_mmu_base();
76 
77 	/*
78 	 * Are we trying to map a global address.  If so, then index
79 	 * the global pgd table instead of our local one.
80 	 */
81 	if (is_global_space(virt)) {
82 		/* Scale into 2gig map */
83 		virt &= ~0x80000000;
84 	}
85 
86 	/* Base of the pgd table plus our 4Meg entry, 4bytes each */
87 	pgd_phys = mmu_base_phys + ((virt >> PGDIR_SHIFT) * 4);
88 
89 	pgd_virt = map_addr(pgd_phys);
90 
91 	return pgd_virt;
92 }
93 
94 /* Given a virtual address, return the virtual address of the relevant pte */
pgtable_entry_addr(unsigned long virt)95 static unsigned long pgtable_entry_addr(unsigned long virt)
96 {
97 	unsigned long pgtable_phys;
98 	unsigned long pgtable_virt, pte_virt;
99 
100 	/* Find the physical address of the 4MB page table*/
101 	pgtable_phys = metag_in32(pgd_entry_addr(virt)) & MMCU_ENTRY_ADDR_BITS;
102 
103 	/* Map it to a virtual address */
104 	pgtable_virt = map_addr(pgtable_phys);
105 
106 	/* And index into it for our pte */
107 	pte_virt = pgtable_virt + ((virt >> PAGE_SHIFT) & 0x3FF) * 4;
108 
109 	return pte_virt;
110 }
111 
mmu_read_first_level_page(unsigned long vaddr)112 unsigned long mmu_read_first_level_page(unsigned long vaddr)
113 {
114 	return metag_in32(pgd_entry_addr(vaddr));
115 }
116 
mmu_read_second_level_page(unsigned long vaddr)117 unsigned long mmu_read_second_level_page(unsigned long vaddr)
118 {
119 	return metag_in32(pgtable_entry_addr(vaddr));
120 }
121 
mmu_get_base(void)122 unsigned long mmu_get_base(void)
123 {
124 	static unsigned long __base;
125 
126 	/* Find the base of our MMU pgd table */
127 	if (!__base)
128 		__base = pgd_entry_addr(0);
129 
130 	return __base;
131 }
132 
mmu_init(unsigned long mem_end)133 void __init mmu_init(unsigned long mem_end)
134 {
135 	unsigned long entry, addr;
136 	pgd_t *p_swapper_pg_dir;
137 
138 	/*
139 	 * Now copy over any MMU pgd entries already in the mmu page tables
140 	 * over to our root init process (swapper_pg_dir) map.  This map is
141 	 * then inherited by all other processes, which means all processes
142 	 * inherit a map of the kernel space.
143 	 */
144 	addr = PAGE_OFFSET;
145 	entry = pgd_index(PAGE_OFFSET);
146 	p_swapper_pg_dir = pgd_offset_k(0) + entry;
147 
148 	while (addr <= META_MEMORY_LIMIT) {
149 		unsigned long pgd_entry;
150 		/* copy over the current MMU value */
151 		pgd_entry = mmu_read_first_level_page(addr);
152 		pgd_val(*p_swapper_pg_dir) = pgd_entry;
153 
154 		p_swapper_pg_dir++;
155 		addr += PGDIR_SIZE;
156 	}
157 }
158