1 /*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define DATA_SIZE (1 << SHIFT)
20
21 #if DATA_SIZE == 8
22 #define SUFFIX q
23 #define USUFFIX q
24 #define DATA_TYPE uint64_t
25 #elif DATA_SIZE == 4
26 #define SUFFIX l
27 #define USUFFIX l
28 #define DATA_TYPE uint32_t
29 #elif DATA_SIZE == 2
30 #define SUFFIX w
31 #define USUFFIX uw
32 #define DATA_TYPE uint16_t
33 #elif DATA_SIZE == 1
34 #define SUFFIX b
35 #define USUFFIX ub
36 #define DATA_TYPE uint8_t
37 #else
38 #error unsupported data size
39 #endif
40
41 #ifdef SOFTMMU_CODE_ACCESS
42 #define READ_ACCESS_TYPE 2
43 #define ADDR_READ addr_code
44 #else
45 #define READ_ACCESS_TYPE 0
46 #define ADDR_READ addr_read
47 #endif
48
49 #if defined(CONFIG_MEMCHECK) && !defined(OUTSIDE_JIT) && !defined(SOFTMMU_CODE_ACCESS)
50 /*
51 * Support for memory access checker.
52 * We need to instrument __ldx/__stx_mmu routines implemented in this file with
53 * callbacks to access validation routines implemented by the memory checker.
54 * Note that (at least for now) we don't do that instrumentation for memory
55 * addressing the code (SOFTMMU_CODE_ACCESS controls that). Also, we don't want
56 * to instrument code that is used by emulator itself (OUTSIDE_JIT controls
57 * that).
58 */
59 #define CONFIG_MEMCHECK_MMU
60 #include "memcheck/memcheck_api.h"
61 #endif // CONFIG_MEMCHECK && !OUTSIDE_JIT && !SOFTMMU_CODE_ACCESS
62
63 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
64 int mmu_idx,
65 void *retaddr);
glue(io_read,SUFFIX)66 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
67 target_ulong addr,
68 void *retaddr)
69 {
70 DATA_TYPE res;
71 int index;
72 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
73 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
74 env->mem_io_pc = (unsigned long)retaddr;
75 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
76 && !can_do_io(env)) {
77 cpu_io_recompile(env, retaddr);
78 }
79
80 env->mem_io_vaddr = addr;
81 #if SHIFT <= 2
82 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
83 #else
84 #ifdef TARGET_WORDS_BIGENDIAN
85 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
86 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
87 #else
88 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
89 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
90 #endif
91 #endif /* SHIFT > 2 */
92 return res;
93 }
94
95 /* handle all cases except unaligned access which span two pages */
glue(glue (__ld,SUFFIX),MMUSUFFIX)96 DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
97 int mmu_idx)
98 {
99 DATA_TYPE res;
100 int index;
101 target_ulong tlb_addr;
102 target_phys_addr_t addend;
103 void *retaddr;
104 #ifdef CONFIG_MEMCHECK_MMU
105 int invalidate_cache = 0;
106 #endif // CONFIG_MEMCHECK_MMU
107
108 /* test if there is match for unaligned or IO access */
109 /* XXX: could done more in memory macro in a non portable way */
110 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
111 redo:
112 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
113 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
114 if (tlb_addr & ~TARGET_PAGE_MASK) {
115 /* IO access */
116 if ((addr & (DATA_SIZE - 1)) != 0)
117 goto do_unaligned_access;
118 retaddr = GETPC();
119 addend = env->iotlb[mmu_idx][index];
120 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
121 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
122 /* This is not I/O access: do access verification. */
123 #ifdef CONFIG_MEMCHECK_MMU
124 /* We only validate access to the guest's user space, for which
125 * mmu_idx is set to 1. */
126 if (memcheck_instrument_mmu && mmu_idx == 1 &&
127 memcheck_validate_ld(addr, DATA_SIZE, (target_ulong)GETPC())) {
128 /* Memory read breaks page boundary. So, if required, we
129 * must invalidate two caches in TLB. */
130 invalidate_cache = 2;
131 }
132 #endif // CONFIG_MEMCHECK_MMU
133 /* slow unaligned access (it spans two pages or IO) */
134 do_unaligned_access:
135 retaddr = GETPC();
136 #ifdef ALIGNED_ONLY
137 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
138 #endif
139 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
140 mmu_idx, retaddr);
141 } else {
142 #ifdef CONFIG_MEMCHECK_MMU
143 /* We only validate access to the guest's user space, for which
144 * mmu_idx is set to 1. */
145 if (memcheck_instrument_mmu && mmu_idx == 1) {
146 invalidate_cache = memcheck_validate_ld(addr, DATA_SIZE,
147 (target_ulong)GETPC());
148 }
149 #endif // CONFIG_MEMCHECK_MMU
150 /* unaligned/aligned access in the same page */
151 #ifdef ALIGNED_ONLY
152 if ((addr & (DATA_SIZE - 1)) != 0) {
153 retaddr = GETPC();
154 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
155 }
156 #endif
157 addend = env->tlb_table[mmu_idx][index].addend;
158 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
159 }
160 #ifdef CONFIG_MEMCHECK_MMU
161 if (invalidate_cache) {
162 /* Accessed memory is under memchecker control. We must invalidate
163 * containing page(s) in order to make sure that next access to them
164 * will invoke _ld/_st_mmu. */
165 env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
166 env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
167 if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
168 // Read crossed page boundaris. Invalidate second cache too.
169 env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
170 env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
171 }
172 }
173 #endif // CONFIG_MEMCHECK_MMU
174 } else {
175 /* the page is not in the TLB : fill it */
176 retaddr = GETPC();
177 #ifdef ALIGNED_ONLY
178 if ((addr & (DATA_SIZE - 1)) != 0)
179 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
180 #endif
181 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
182 goto redo;
183 }
184 return res;
185 }
186
187 /* handle all unaligned cases */
glue(glue (slow_ld,SUFFIX),MMUSUFFIX)188 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
189 int mmu_idx,
190 void *retaddr)
191 {
192 DATA_TYPE res, res1, res2;
193 int index, shift;
194 target_phys_addr_t addend;
195 target_ulong tlb_addr, addr1, addr2;
196
197 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
198 redo:
199 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
200 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
201 if (tlb_addr & ~TARGET_PAGE_MASK) {
202 /* IO access */
203 if ((addr & (DATA_SIZE - 1)) != 0)
204 goto do_unaligned_access;
205 retaddr = GETPC();
206 addend = env->iotlb[mmu_idx][index];
207 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
208 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
209 do_unaligned_access:
210 /* slow unaligned access (it spans two pages) */
211 addr1 = addr & ~(DATA_SIZE - 1);
212 addr2 = addr1 + DATA_SIZE;
213 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
214 mmu_idx, retaddr);
215 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
216 mmu_idx, retaddr);
217 shift = (addr & (DATA_SIZE - 1)) * 8;
218 #ifdef TARGET_WORDS_BIGENDIAN
219 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
220 #else
221 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
222 #endif
223 res = (DATA_TYPE)res;
224 } else {
225 /* unaligned/aligned access in the same page */
226 addend = env->tlb_table[mmu_idx][index].addend;
227 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
228 }
229 } else {
230 /* the page is not in the TLB : fill it */
231 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
232 goto redo;
233 }
234 return res;
235 }
236
237 #ifndef SOFTMMU_CODE_ACCESS
238
239 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
240 DATA_TYPE val,
241 int mmu_idx,
242 void *retaddr);
243
glue(io_write,SUFFIX)244 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
245 DATA_TYPE val,
246 target_ulong addr,
247 void *retaddr)
248 {
249 int index;
250 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
251 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
252 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
253 && !can_do_io(env)) {
254 cpu_io_recompile(env, retaddr);
255 }
256
257 env->mem_io_vaddr = addr;
258 env->mem_io_pc = (unsigned long)retaddr;
259 #if SHIFT <= 2
260 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
261 #else
262 #ifdef TARGET_WORDS_BIGENDIAN
263 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
264 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
265 #else
266 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
267 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
268 #endif
269 #endif /* SHIFT > 2 */
270 }
271
glue(glue (__st,SUFFIX),MMUSUFFIX)272 void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
273 DATA_TYPE val,
274 int mmu_idx)
275 {
276 target_phys_addr_t addend;
277 target_ulong tlb_addr;
278 void *retaddr;
279 int index;
280 #ifdef CONFIG_MEMCHECK_MMU
281 int invalidate_cache = 0;
282 #endif // CONFIG_MEMCHECK_MMU
283
284 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
285 redo:
286 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
287 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
288 if (tlb_addr & ~TARGET_PAGE_MASK) {
289 /* IO access */
290 if ((addr & (DATA_SIZE - 1)) != 0)
291 goto do_unaligned_access;
292 retaddr = GETPC();
293 addend = env->iotlb[mmu_idx][index];
294 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
295 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
296 /* This is not I/O access: do access verification. */
297 #ifdef CONFIG_MEMCHECK_MMU
298 /* We only validate access to the guest's user space, for which
299 * mmu_idx is set to 1. */
300 if (memcheck_instrument_mmu && mmu_idx == 1 &&
301 memcheck_validate_st(addr, DATA_SIZE, (uint64_t)val,
302 (target_ulong)GETPC())) {
303 /* Memory write breaks page boundary. So, if required, we
304 * must invalidate two caches in TLB. */
305 invalidate_cache = 2;
306 }
307 #endif // CONFIG_MEMCHECK_MMU
308 do_unaligned_access:
309 retaddr = GETPC();
310 #ifdef ALIGNED_ONLY
311 do_unaligned_access(addr, 1, mmu_idx, retaddr);
312 #endif
313 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
314 mmu_idx, retaddr);
315 } else {
316 #ifdef CONFIG_MEMCHECK_MMU
317 /* We only validate access to the guest's user space, for which
318 * mmu_idx is set to 1. */
319 if (memcheck_instrument_mmu && mmu_idx == 1) {
320 invalidate_cache = memcheck_validate_st(addr, DATA_SIZE,
321 (uint64_t)val,
322 (target_ulong)GETPC());
323 }
324 #endif // CONFIG_MEMCHECK_MMU
325 /* aligned/unaligned access in the same page */
326 #ifdef ALIGNED_ONLY
327 if ((addr & (DATA_SIZE - 1)) != 0) {
328 retaddr = GETPC();
329 do_unaligned_access(addr, 1, mmu_idx, retaddr);
330 }
331 #endif
332 addend = env->tlb_table[mmu_idx][index].addend;
333 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
334 }
335 #ifdef CONFIG_MEMCHECK_MMU
336 if (invalidate_cache) {
337 /* Accessed memory is under memchecker control. We must invalidate
338 * containing page(s) in order to make sure that next access to them
339 * will invoke _ld/_st_mmu. */
340 env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
341 env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
342 if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
343 // Write crossed page boundaris. Invalidate second cache too.
344 env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
345 env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
346 }
347 }
348 #endif // CONFIG_MEMCHECK_MMU
349 } else {
350 /* the page is not in the TLB : fill it */
351 retaddr = GETPC();
352 #ifdef ALIGNED_ONLY
353 if ((addr & (DATA_SIZE - 1)) != 0)
354 do_unaligned_access(addr, 1, mmu_idx, retaddr);
355 #endif
356 tlb_fill(addr, 1, mmu_idx, retaddr);
357 goto redo;
358 }
359 }
360
361 /* handles all unaligned cases */
glue(glue (slow_st,SUFFIX),MMUSUFFIX)362 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
363 DATA_TYPE val,
364 int mmu_idx,
365 void *retaddr)
366 {
367 target_phys_addr_t addend;
368 target_ulong tlb_addr;
369 int index, i;
370
371 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
372 redo:
373 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
374 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
375 if (tlb_addr & ~TARGET_PAGE_MASK) {
376 /* IO access */
377 if ((addr & (DATA_SIZE - 1)) != 0)
378 goto do_unaligned_access;
379 addend = env->iotlb[mmu_idx][index];
380 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
381 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
382 do_unaligned_access:
383 /* XXX: not efficient, but simple */
384 /* Note: relies on the fact that tlb_fill() does not remove the
385 * previous page from the TLB cache. */
386 for(i = DATA_SIZE - 1; i >= 0; i--) {
387 #ifdef TARGET_WORDS_BIGENDIAN
388 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
389 mmu_idx, retaddr);
390 #else
391 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
392 mmu_idx, retaddr);
393 #endif
394 }
395 } else {
396 /* aligned/unaligned access in the same page */
397 addend = env->tlb_table[mmu_idx][index].addend;
398 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
399 }
400 } else {
401 /* the page is not in the TLB : fill it */
402 tlb_fill(addr, 1, mmu_idx, retaddr);
403 goto redo;
404 }
405 }
406
407 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
408
409 #undef READ_ACCESS_TYPE
410 #undef SHIFT
411 #undef DATA_TYPE
412 #undef SUFFIX
413 #undef USUFFIX
414 #undef DATA_SIZE
415 #undef ADDR_READ
416