• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "config.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/cputlb.h"
24 
25 /* statistics */
26 int tlb_flush_count;
27 
28 static const CPUTLBEntry s_cputlb_empty_entry = {
29     .addr_read  = -1,
30     .addr_write = -1,
31     .addr_code  = -1,
32     .addend     = -1,
33 };
34 
35 /* NOTE:
36  * If flush_global is true (the usual case), flush all tlb entries.
37  * If flush_global is false, flush (at least) all tlb entries not
38  * marked global.
39  *
40  * Since QEMU doesn't currently implement a global/not-global flag
41  * for tlb entries, at the moment tlb_flush() will also flush all
42  * tlb entries in the flush_global == false case. This is OK because
43  * CPU architectures generally permit an implementation to drop
44  * entries from the TLB at any time, so flushing more entries than
45  * required is only an efficiency issue, not a correctness issue.
46  */
tlb_flush(CPUArchState * env,int flush_global)47 void tlb_flush(CPUArchState *env, int flush_global)
48 {
49     int i;
50 
51 #if defined(DEBUG_TLB)
52     printf("tlb_flush:\n");
53 #endif
54     /* must reset current TB so that interrupts cannot modify the
55        links while we are modifying them */
56     env->current_tb = NULL;
57 
58     for (i = 0; i < CPU_TLB_SIZE; i++) {
59         int mmu_idx;
60 
61         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
62             env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
63         }
64     }
65 
66     memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
67 
68     env->tlb_flush_addr = -1;
69     env->tlb_flush_mask = 0;
70     tlb_flush_count++;
71 }
72 
tlb_flush_entry(CPUTLBEntry * tlb_entry,target_ulong addr)73 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
74 {
75     if (addr == (tlb_entry->addr_read &
76                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
77         addr == (tlb_entry->addr_write &
78                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
79         addr == (tlb_entry->addr_code &
80                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
81         *tlb_entry = s_cputlb_empty_entry;
82     }
83 }
84 
tlb_flush_page(CPUArchState * env,target_ulong addr)85 void tlb_flush_page(CPUArchState *env, target_ulong addr)
86 {
87     int i;
88     int mmu_idx;
89 
90 #if defined(DEBUG_TLB)
91     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
92 #endif
93     /* Check if we need to flush due to large pages.  */
94     if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
95 #if defined(DEBUG_TLB)
96         printf("tlb_flush_page: forced full flush ("
97                TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
98                env->tlb_flush_addr, env->tlb_flush_mask);
99 #endif
100         tlb_flush(env, 1);
101         return;
102     }
103     /* must reset current TB so that interrupts cannot modify the
104        links while we are modifying them */
105     env->current_tb = NULL;
106 
107     addr &= TARGET_PAGE_MASK;
108     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
109     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
110         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
111     }
112 
113     tb_flush_jmp_cache(env, addr);
114 }
115 
116 /* update the TLBs so that writes to code in the virtual page 'addr'
117    can be detected */
tlb_protect_code(ram_addr_t ram_addr)118 void tlb_protect_code(ram_addr_t ram_addr)
119 {
120     cpu_physical_memory_reset_dirty(ram_addr,
121                                     ram_addr + TARGET_PAGE_SIZE,
122                                     CODE_DIRTY_FLAG);
123 }
124 
125 /* update the TLB so that writes in physical page 'phys_addr' are no longer
126    tested for self modifying code */
tlb_unprotect_code_phys(CPUArchState * env,ram_addr_t ram_addr,target_ulong vaddr)127 void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
128                              target_ulong vaddr)
129 {
130     cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
131 }
132 
tlb_is_dirty_ram(CPUTLBEntry * tlbe)133 static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
134 {
135     return (tlbe->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM;
136 }
137 
tlb_reset_dirty_range(CPUTLBEntry * tlb_entry,uintptr_t start,uintptr_t length)138 void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
139                            uintptr_t length)
140 {
141     uintptr_t addr;
142 
143     if (tlb_is_dirty_ram(tlb_entry)) {
144         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
145         if ((addr - start) < length) {
146             tlb_entry->addr_write &= TARGET_PAGE_MASK;
147             tlb_entry->addr_write |= TLB_NOTDIRTY;
148         }
149     }
150 }
151 
tlb_set_dirty1(CPUTLBEntry * tlb_entry,target_ulong vaddr)152 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
153 {
154     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
155         tlb_entry->addr_write = vaddr;
156     }
157 }
158 
159 /* update the TLB corresponding to virtual page vaddr
160    so that it is no longer dirty */
tlb_set_dirty(CPUArchState * env,target_ulong vaddr)161 void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
162 {
163     int i;
164     int mmu_idx;
165 
166     vaddr &= TARGET_PAGE_MASK;
167     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
168     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
169         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
170     }
171 }
172 
173 /* Our TLB does not support large pages, so remember the area covered by
174    large pages and trigger a full TLB flush if these are invalidated.  */
tlb_add_large_page(CPUArchState * env,target_ulong vaddr,target_ulong size)175 static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
176                                target_ulong size)
177 {
178     target_ulong mask = ~(size - 1);
179 
180     if (env->tlb_flush_addr == (target_ulong)-1) {
181         env->tlb_flush_addr = vaddr & mask;
182         env->tlb_flush_mask = mask;
183         return;
184     }
185     /* Extend the existing region to include the new page.
186        This is a compromise between unnecessary flushes and the cost
187        of maintaining a full variable size TLB.  */
188     mask &= env->tlb_flush_mask;
189     while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
190         mask <<= 1;
191     }
192     env->tlb_flush_addr &= mask;
193     env->tlb_flush_mask = mask;
194 }
195 
196 /* Add a new TLB entry. At most one entry for a given virtual address
197    is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
198    supplied size is only used by tlb_flush_page.  */
tlb_set_page(CPUArchState * env,target_ulong vaddr,hwaddr paddr,int prot,int mmu_idx,target_ulong size)199 void tlb_set_page(CPUArchState *env, target_ulong vaddr,
200                   hwaddr paddr, int prot,
201                   int mmu_idx, target_ulong size)
202 {
203     PhysPageDesc *p;
204     unsigned long pd;
205     unsigned int index;
206     target_ulong address;
207     target_ulong code_address;
208     ptrdiff_t addend;
209     CPUTLBEntry *te;
210     CPUWatchpoint *wp;
211     hwaddr iotlb;
212 
213     assert(size >= TARGET_PAGE_SIZE);
214     if (size != TARGET_PAGE_SIZE) {
215         tlb_add_large_page(env, vaddr, size);
216     }
217     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
218     if (!p) {
219         pd = IO_MEM_UNASSIGNED;
220     } else {
221         pd = p->phys_offset;
222     }
223 #if defined(DEBUG_TLB)
224     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
225            " prot=%x idx=%d pd=0x%08lx\n",
226            vaddr, paddr, prot, mmu_idx, pd);
227 #endif
228 
229     address = vaddr;
230     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
231         /* IO memory case (romd handled later) */
232         address |= TLB_MMIO;
233     }
234     addend = (ptrdiff_t)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
235     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
236         /* Normal RAM.  */
237         iotlb = pd & TARGET_PAGE_MASK;
238         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
239             iotlb |= IO_MEM_NOTDIRTY;
240         else
241             iotlb |= IO_MEM_ROM;
242     } else {
243         /* IO handlers are currently passed a physical address.
244            It would be nice to pass an offset from the base address
245            of that region.  This would avoid having to special case RAM,
246            and avoid full address decoding in every device.
247            We can't use the high bits of pd for this because
248            IO_MEM_ROMD uses these as a ram address.  */
249         iotlb = (pd & ~TARGET_PAGE_MASK);
250         if (p) {
251             iotlb += p->region_offset;
252         } else {
253             iotlb += paddr;
254         }
255     }
256 
257     code_address = address;
258     /* Make accesses to pages with watchpoints go via the
259        watchpoint trap routines.  */
260     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
261         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
262             iotlb = io_mem_watch + paddr;
263             /* TODO: The memory case can be optimized by not trapping
264                reads of pages with a write breakpoint.  */
265             address |= TLB_MMIO;
266         }
267     }
268 
269     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
270     env->iotlb[mmu_idx][index] = iotlb - vaddr;
271     te = &env->tlb_table[mmu_idx][index];
272     te->addend = addend - vaddr;
273     if (prot & PAGE_READ) {
274         te->addr_read = address;
275     } else {
276         te->addr_read = -1;
277     }
278 
279     if (prot & PAGE_EXEC) {
280         te->addr_code = code_address;
281     } else {
282         te->addr_code = -1;
283     }
284     if (prot & PAGE_WRITE) {
285         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
286             (pd & IO_MEM_ROMD)) {
287             /* Write access calls the I/O callback.  */
288             te->addr_write = address | TLB_MMIO;
289         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
290                    !cpu_physical_memory_is_dirty(pd)) {
291             te->addr_write = address | TLB_NOTDIRTY;
292         } else {
293             te->addr_write = address;
294         }
295     } else {
296         te->addr_write = -1;
297     }
298 }
299 
300 /* NOTE: this function can trigger an exception */
301 /* NOTE2: the returned address is not exactly the physical address: it
302    is the offset relative to phys_ram_base */
get_page_addr_code(CPUArchState * env1,target_ulong addr)303 tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
304 {
305     int mmu_idx, page_index, pd;
306     void *p;
307 
308     page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
309     mmu_idx = cpu_mmu_index(env1);
310     if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
311                  (addr & TARGET_PAGE_MASK))) {
312         cpu_ldub_code(env1, addr);
313     }
314     pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
315     if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
316 #if defined(TARGET_SPARC) || defined(TARGET_MIPS)
317         cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
318 #else
319         cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
320 #endif
321     }
322     p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
323     return qemu_ram_addr_from_host_nofail(p);
324 }
325 
326 #define MMUSUFFIX _cmmu
327 #define SOFTMMU_CODE_ACCESS
328 
329 #define SHIFT 0
330 #include "exec/softmmu_template.h"
331 
332 #define SHIFT 1
333 #include "exec/softmmu_template.h"
334 
335 #define SHIFT 2
336 #include "exec/softmmu_template.h"
337 
338 #define SHIFT 3
339 #include "exec/softmmu_template.h"
340