1 /*
2 * Software MMU support
3 *
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
6 *
7 * Included from target op helpers and exec.c.
8 *
9 * Copyright (c) 2003 Fabrice Bellard
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 */
24 #include "qemu/timer.h"
25
26 #define DATA_SIZE (1 << SHIFT)
27
28 #if DATA_SIZE == 8
29 #define SUFFIX q
30 #define LSUFFIX q
31 #define SDATA_TYPE int64_t
32 #elif DATA_SIZE == 4
33 #define SUFFIX l
34 #define LSUFFIX l
35 #define SDATA_TYPE int32_t
36 #elif DATA_SIZE == 2
37 #define SUFFIX w
38 #define LSUFFIX uw
39 #define SDATA_TYPE int16_t
40 #elif DATA_SIZE == 1
41 #define SUFFIX b
42 #define LSUFFIX ub
43 #define SDATA_TYPE int8_t
44 #else
45 #error unsupported data size
46 #endif
47
48 #define DATA_TYPE glue(u, SDATA_TYPE)
49
50 /* For the benefit of TCG generated code, we want to avoid the complication
51 of ABI-specific return type promotion and always return a value extended
52 to the register size of the host. This is tcg_target_long, except in the
53 case of a 32-bit host and 64-bit data, and for that we always have
54 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
55 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
56 # define WORD_TYPE DATA_TYPE
57 # define USUFFIX SUFFIX
58 #else
59 # define WORD_TYPE tcg_target_ulong
60 # define USUFFIX glue(u, SUFFIX)
61 # define SSUFFIX glue(s, SUFFIX)
62 #endif
63
64 #ifdef SOFTMMU_CODE_ACCESS
65 #define READ_ACCESS_TYPE 2
66 #define ADDR_READ addr_code
67 #else
68 #define READ_ACCESS_TYPE 0
69 #define ADDR_READ addr_read
70 #endif
71
72 #if DATA_SIZE == 8
73 # define BSWAP(X) bswap64(X)
74 #elif DATA_SIZE == 4
75 # define BSWAP(X) bswap32(X)
76 #elif DATA_SIZE == 2
77 # define BSWAP(X) bswap16(X)
78 #else
79 # define BSWAP(X) (X)
80 #endif
81
82 #ifdef TARGET_WORDS_BIGENDIAN
83 # define TGT_BE(X) (X)
84 # define TGT_LE(X) BSWAP(X)
85 #else
86 # define TGT_BE(X) BSWAP(X)
87 # define TGT_LE(X) (X)
88 #endif
89
90 #if DATA_SIZE == 1
91 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
92 # define helper_be_ld_name helper_le_ld_name
93 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
94 # define helper_be_lds_name helper_le_lds_name
95 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
96 # define helper_be_st_name helper_le_st_name
97 #else
98 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
99 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
100 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
101 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
102 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
103 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
104 #endif
105
106 #ifdef TARGET_WORDS_BIGENDIAN
107 # define helper_te_ld_name helper_be_ld_name
108 # define helper_te_st_name helper_be_st_name
109 #else
110 # define helper_te_ld_name helper_le_ld_name
111 # define helper_te_st_name helper_le_st_name
112 #endif
113
114
glue(io_read,SUFFIX)115 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
116 hwaddr physaddr,
117 target_ulong addr,
118 uintptr_t retaddr)
119 {
120 uint64_t val;
121 int index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
122 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
123 env->mem_io_pc = retaddr;
124 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
125 && !can_do_io(env)) {
126 cpu_io_recompile(env, retaddr);
127 }
128
129 env->mem_io_vaddr = addr;
130 #if SHIFT <= 2
131 val = io_mem_read(index, physaddr, 1 << SHIFT);
132 #else
133 #ifdef TARGET_WORDS_BIGENDIAN
134 val = (uint64_t)io_mem_read(index, physaddr, 4) << 32;
135 val |= io_mem_read(index, physaddr + 4, 4);
136 #else
137 val = io_mem_read(index, physaddr, 4);
138 val |= (uint64_t)io_mem_read(index, physaddr + 4, 4) << 32;
139 #endif
140 #endif /* SHIFT > 2 */
141 return val;
142 }
143
144 #ifdef SOFTMMU_CODE_ACCESS
145 static __attribute__((unused))
146 #endif
helper_le_ld_name(CPUArchState * env,target_ulong addr,int mmu_idx,uintptr_t retaddr)147 WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
148 uintptr_t retaddr)
149 {
150 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
151 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
152 uintptr_t haddr;
153 DATA_TYPE res;
154
155 /* Adjust the given return address. */
156 retaddr -= GETPC_ADJ;
157
158 /* If the TLB entry is for a different page, reload and try again. */
159 if ((addr & TARGET_PAGE_MASK)
160 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
161 #ifdef ALIGNED_ONLY
162 if ((addr & (DATA_SIZE - 1)) != 0) {
163 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
164 }
165 #endif
166 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
167 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
168 }
169
170 /* Handle an IO access. */
171 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
172 hwaddr ioaddr;
173 if ((addr & (DATA_SIZE - 1)) != 0) {
174 goto do_unaligned_access;
175 }
176 ioaddr = env->iotlb[mmu_idx][index];
177
178 /* ??? Note that the io helpers always read data in the target
179 byte ordering. We should push the LE/BE request down into io. */
180 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
181 res = TGT_LE(res);
182 return res;
183 }
184
185 /* Handle slow unaligned access (it spans two pages or IO). */
186 if (DATA_SIZE > 1
187 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
188 >= TARGET_PAGE_SIZE)) {
189 target_ulong addr1, addr2;
190 DATA_TYPE res1, res2;
191 unsigned shift;
192 do_unaligned_access:
193 #ifdef ALIGNED_ONLY
194 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
195 #endif
196 addr1 = addr & ~(DATA_SIZE - 1);
197 addr2 = addr1 + DATA_SIZE;
198 /* Note the adjustment at the beginning of the function.
199 Undo that for the recursion. */
200 res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
201 res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
202 shift = (addr & (DATA_SIZE - 1)) * 8;
203
204 /* Little-endian combine. */
205 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
206 return res;
207 }
208
209 /* Handle aligned access or unaligned access in the same page. */
210 #ifdef ALIGNED_ONLY
211 if ((addr & (DATA_SIZE - 1)) != 0) {
212 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
213 }
214 #endif
215
216 haddr = addr + env->tlb_table[mmu_idx][index].addend;
217 #if DATA_SIZE == 1
218 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
219 #else
220 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
221 #endif
222 return res;
223 }
224
225 #if DATA_SIZE > 1
226 #ifdef SOFTMMU_CODE_ACCESS
227 static __attribute__((unused))
228 #endif
helper_be_ld_name(CPUArchState * env,target_ulong addr,int mmu_idx,uintptr_t retaddr)229 WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
230 uintptr_t retaddr)
231 {
232 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
233 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
234 uintptr_t haddr;
235 DATA_TYPE res;
236
237 /* Adjust the given return address. */
238 retaddr -= GETPC_ADJ;
239
240 /* If the TLB entry is for a different page, reload and try again. */
241 if ((addr & TARGET_PAGE_MASK)
242 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
243 #ifdef ALIGNED_ONLY
244 if ((addr & (DATA_SIZE - 1)) != 0) {
245 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
246 }
247 #endif
248 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
249 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
250 }
251
252 /* Handle an IO access. */
253 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
254 hwaddr ioaddr;
255 if ((addr & (DATA_SIZE - 1)) != 0) {
256 goto do_unaligned_access;
257 }
258 ioaddr = env->iotlb[mmu_idx][index];
259
260 /* ??? Note that the io helpers always read data in the target
261 byte ordering. We should push the LE/BE request down into io. */
262 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
263 res = TGT_BE(res);
264 return res;
265 }
266
267 /* Handle slow unaligned access (it spans two pages or IO). */
268 if (DATA_SIZE > 1
269 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
270 >= TARGET_PAGE_SIZE)) {
271 target_ulong addr1, addr2;
272 DATA_TYPE res1, res2;
273 unsigned shift;
274 do_unaligned_access:
275 #ifdef ALIGNED_ONLY
276 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
277 #endif
278 addr1 = addr & ~(DATA_SIZE - 1);
279 addr2 = addr1 + DATA_SIZE;
280 /* Note the adjustment at the beginning of the function.
281 Undo that for the recursion. */
282 res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
283 res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
284 shift = (addr & (DATA_SIZE - 1)) * 8;
285
286 /* Big-endian combine. */
287 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
288 return res;
289 }
290
291 /* Handle aligned access or unaligned access in the same page. */
292 #ifdef ALIGNED_ONLY
293 if ((addr & (DATA_SIZE - 1)) != 0) {
294 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
295 }
296 #endif
297
298 haddr = addr + env->tlb_table[mmu_idx][index].addend;
299 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
300 return res;
301 }
302 #endif /* DATA_SIZE > 1 */
303
304 DATA_TYPE
glue(glue (helper_ld,SUFFIX),MMUSUFFIX)305 glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
306 int mmu_idx)
307 {
308 return helper_te_ld_name (env, addr, mmu_idx, GETRA());
309 }
310
311 #ifndef SOFTMMU_CODE_ACCESS
312
313 /* Provide signed versions of the load routines as well. We can of course
314 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
315 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
helper_le_lds_name(CPUArchState * env,target_ulong addr,int mmu_idx,uintptr_t retaddr)316 WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
317 int mmu_idx, uintptr_t retaddr)
318 {
319 return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
320 }
321
322 # if DATA_SIZE > 1
helper_be_lds_name(CPUArchState * env,target_ulong addr,int mmu_idx,uintptr_t retaddr)323 WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
324 int mmu_idx, uintptr_t retaddr)
325 {
326 return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
327 }
328 # endif
329 #endif
330
glue(io_write,SUFFIX)331 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
332 hwaddr physaddr,
333 DATA_TYPE val,
334 target_ulong addr,
335 uintptr_t retaddr)
336 {
337 int index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
338 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
339 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
340 && !can_do_io(env)) {
341 cpu_io_recompile(env, retaddr);
342 }
343
344 env->mem_io_vaddr = addr;
345 env->mem_io_pc = retaddr;
346 #if SHIFT <= 2
347 io_mem_write(index, physaddr, val, 1 << SHIFT);
348 #else
349 #ifdef TARGET_WORDS_BIGENDIAN
350 io_mem_write(index, physaddr, val >> 32, 4);
351 io_mem_write(index, physaddr + 4, val, 4);
352 #else
353 io_mem_write(index, physaddr, val, 4);
354 io_mem_write(index, physaddr + 4, val >> 32, 4);
355 #endif
356 #endif /* SHIFT > 2 */
357 }
358
helper_le_st_name(CPUArchState * env,target_ulong addr,DATA_TYPE val,int mmu_idx,uintptr_t retaddr)359 void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
360 int mmu_idx, uintptr_t retaddr)
361 {
362 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
363 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
364 uintptr_t haddr;
365
366 /* Adjust the given return address. */
367 retaddr -= GETPC_ADJ;
368
369 /* If the TLB entry is for a different page, reload and try again. */
370 if ((addr & TARGET_PAGE_MASK)
371 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
372 #ifdef ALIGNED_ONLY
373 if ((addr & (DATA_SIZE - 1)) != 0) {
374 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
375 }
376 #endif
377 tlb_fill(env, addr, 1, mmu_idx, retaddr);
378 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
379 }
380
381 /* Handle an IO access. */
382 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
383 hwaddr ioaddr;
384 if ((addr & (DATA_SIZE - 1)) != 0) {
385 goto do_unaligned_access;
386 }
387 ioaddr = env->iotlb[mmu_idx][index];
388
389 /* ??? Note that the io helpers always read data in the target
390 byte ordering. We should push the LE/BE request down into io. */
391 val = TGT_LE(val);
392 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
393 return;
394 }
395
396 /* Handle slow unaligned access (it spans two pages or IO). */
397 if (DATA_SIZE > 1
398 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
399 >= TARGET_PAGE_SIZE)) {
400 int i;
401 do_unaligned_access:
402 #ifdef ALIGNED_ONLY
403 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
404 #endif
405 /* XXX: not efficient, but simple */
406 /* Note: relies on the fact that tlb_fill() does not remove the
407 * previous page from the TLB cache. */
408 for (i = DATA_SIZE - 1; i >= 0; i--) {
409 /* Little-endian extract. */
410 uint8_t val8 = val >> (i * 8);
411 /* Note the adjustment at the beginning of the function.
412 Undo that for the recursion. */
413 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
414 mmu_idx, retaddr + GETPC_ADJ);
415 }
416 return;
417 }
418
419 /* Handle aligned access or unaligned access in the same page. */
420 #ifdef ALIGNED_ONLY
421 if ((addr & (DATA_SIZE - 1)) != 0) {
422 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
423 }
424 #endif
425
426 haddr = addr + env->tlb_table[mmu_idx][index].addend;
427 #if DATA_SIZE == 1
428 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
429 #else
430 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
431 #endif
432 }
433
434 #if DATA_SIZE > 1
helper_be_st_name(CPUArchState * env,target_ulong addr,DATA_TYPE val,int mmu_idx,uintptr_t retaddr)435 void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
436 int mmu_idx, uintptr_t retaddr)
437 {
438 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
439 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
440 uintptr_t haddr;
441
442 /* Adjust the given return address. */
443 retaddr -= GETPC_ADJ;
444
445 /* If the TLB entry is for a different page, reload and try again. */
446 if ((addr & TARGET_PAGE_MASK)
447 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
448 #ifdef ALIGNED_ONLY
449 if ((addr & (DATA_SIZE - 1)) != 0) {
450 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
451 }
452 #endif
453 tlb_fill(env, addr, 1, mmu_idx, retaddr);
454 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
455 }
456
457 /* Handle an IO access. */
458 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
459 hwaddr ioaddr;
460 if ((addr & (DATA_SIZE - 1)) != 0) {
461 goto do_unaligned_access;
462 }
463 ioaddr = env->iotlb[mmu_idx][index];
464
465 /* ??? Note that the io helpers always read data in the target
466 byte ordering. We should push the LE/BE request down into io. */
467 val = TGT_BE(val);
468 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
469 return;
470 }
471
472 /* Handle slow unaligned access (it spans two pages or IO). */
473 if (DATA_SIZE > 1
474 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
475 >= TARGET_PAGE_SIZE)) {
476 int i;
477 do_unaligned_access:
478 #ifdef ALIGNED_ONLY
479 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
480 #endif
481 /* XXX: not efficient, but simple */
482 /* Note: relies on the fact that tlb_fill() does not remove the
483 * previous page from the TLB cache. */
484 for (i = DATA_SIZE - 1; i >= 0; i--) {
485 /* Big-endian extract. */
486 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
487 /* Note the adjustment at the beginning of the function.
488 Undo that for the recursion. */
489 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
490 mmu_idx, retaddr + GETPC_ADJ);
491 }
492 return;
493 }
494
495 /* Handle aligned access or unaligned access in the same page. */
496 #ifdef ALIGNED_ONLY
497 if ((addr & (DATA_SIZE - 1)) != 0) {
498 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
499 }
500 #endif
501
502 haddr = addr + env->tlb_table[mmu_idx][index].addend;
503 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
504 }
505 #endif /* DATA_SIZE > 1 */
506
507 void
glue(glue (helper_st,SUFFIX),MMUSUFFIX)508 glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
509 DATA_TYPE val, int mmu_idx)
510 {
511 helper_te_st_name(env, addr, val, mmu_idx, GETRA());
512 }
513
514 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
515
516 #undef READ_ACCESS_TYPE
517 #undef SHIFT
518 #undef DATA_TYPE
519 #undef SUFFIX
520 #undef LSUFFIX
521 #undef DATA_SIZE
522 #undef ADDR_READ
523 #undef WORD_TYPE
524 #undef SDATA_TYPE
525 #undef USUFFIX
526 #undef SSUFFIX
527 #undef BSWAP
528 #undef TGT_BE
529 #undef TGT_LE
530 #undef CPU_BE
531 #undef CPU_LE
532 #undef helper_le_ld_name
533 #undef helper_be_ld_name
534 #undef helper_le_lds_name
535 #undef helper_be_lds_name
536 #undef helper_le_st_name
537 #undef helper_be_st_name
538 #undef helper_te_ld_name
539 #undef helper_te_st_name
540