Home
last modified time | relevance | path

Searched refs:mmu_idx (Results 1 – 9 of 9) sorted by relevance

/external/qemu/
Dsoftmmu_template.h64 int mmu_idx,
97 int mmu_idx) in glue()
112 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ; in glue()
119 addend = env->iotlb[mmu_idx][index]; in glue()
126 if (memcheck_instrument_mmu && mmu_idx == 1 && in glue()
137 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); in glue()
140 mmu_idx, retaddr); in glue()
145 if (memcheck_instrument_mmu && mmu_idx == 1) { in glue()
154 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr); in glue()
157 addend = env->tlb_table[mmu_idx][index].addend; in glue()
[all …]
Dsoftmmu_defs.h4 uint8_t REGPARM __ldb_mmu(target_ulong addr, int mmu_idx);
5 void REGPARM __stb_mmu(target_ulong addr, uint8_t val, int mmu_idx);
6 uint16_t REGPARM __ldw_mmu(target_ulong addr, int mmu_idx);
7 void REGPARM __stw_mmu(target_ulong addr, uint16_t val, int mmu_idx);
8 uint32_t REGPARM __ldl_mmu(target_ulong addr, int mmu_idx);
9 void REGPARM __stl_mmu(target_ulong addr, uint32_t val, int mmu_idx);
10 uint64_t REGPARM __ldq_mmu(target_ulong addr, int mmu_idx);
11 void REGPARM __stq_mmu(target_ulong addr, uint64_t val, int mmu_idx);
13 uint8_t REGPARM __ldb_cmmu(target_ulong addr, int mmu_idx);
14 void REGPARM __stb_cmmu(target_ulong addr, uint8_t val, int mmu_idx);
[all …]
Dsoftmmu_header.h88 int mmu_idx; in glue() local
92 mmu_idx = CPU_MMU_INDEX; in glue()
93 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != in glue()
95 res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx); in glue()
97 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend; in glue()
109 int mmu_idx; in glue() local
113 mmu_idx = CPU_MMU_INDEX; in glue()
114 if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ != in glue()
116 res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx); in glue()
118 physaddr = addr + env->tlb_table[mmu_idx][page_index].addend; in glue()
[all …]
Dsoftmmu_outside_jit.h35 uint8_t REGPARM __ldb_outside_jit(target_ulong addr, int mmu_idx);
36 void REGPARM __stb_outside_jit(target_ulong addr, uint8_t val, int mmu_idx);
37 uint16_t REGPARM __ldw_outside_jit(target_ulong addr, int mmu_idx);
38 void REGPARM __stw_outside_jit(target_ulong addr, uint16_t val, int mmu_idx);
39 uint32_t REGPARM __ldl_outside_jit(target_ulong addr, int mmu_idx);
40 void REGPARM __stl_outside_jit(target_ulong addr, uint32_t val, int mmu_idx);
41 uint64_t REGPARM __ldq_outside_jit(target_ulong addr, int mmu_idx);
42 void REGPARM __stq_outside_jit(target_ulong addr, uint64_t val, int mmu_idx);
Dexec-all.h91 int mmu_idx, int is_softmmu);
94 int mmu_idx, int is_softmmu) in tlb_set_page() argument
98 return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu); in tlb_set_page()
349 void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
387 int mmu_idx, page_index, pd; in get_phys_addr_code() local
391 mmu_idx = cpu_mmu_index(env1); in get_phys_addr_code()
392 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code != in get_phys_addr_code()
396 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK; in get_phys_addr_code()
405 + env1->tlb_table[mmu_idx][page_index].addend; in get_phys_addr_code()
Dexec.c1782 int mmu_idx; in tlb_flush() local
1783 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { in tlb_flush()
1784 env->tlb_table[mmu_idx][i].addr_read = -1; in tlb_flush()
1785 env->tlb_table[mmu_idx][i].addr_write = -1; in tlb_flush()
1786 env->tlb_table[mmu_idx][i].addr_code = -1; in tlb_flush()
1817 int mmu_idx; in tlb_flush_page() local
1828 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) in tlb_flush_page()
1829 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr); in tlb_flush_page()
1895 int mmu_idx; in cpu_physical_memory_reset_dirty() local
1896 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { in cpu_physical_memory_reset_dirty()
[all …]
/external/qemu/target-arm/
Dop_helper.c87 static void do_unaligned_access (target_ulong addr, int is_write, int mmu_idx, void *retaddr) in do_unaligned_access() argument
90 if (mmu_idx) in do_unaligned_access()
104 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr) in tlb_fill() argument
115 ret = cpu_arm_handle_mmu_fault(env, addr, is_write, mmu_idx, 1); in tlb_fill()
Dcpu.h223 int mmu_idx, int is_softmuu);
Dhelper.c461 int mmu_idx, int is_softmmu) in cpu_arm_handle_mmu_fault() argument
1215 int access_type, int mmu_idx, int is_softmmu) in cpu_arm_handle_mmu_fault() argument
1221 is_user = mmu_idx == MMU_USER_IDX; in cpu_arm_handle_mmu_fault()
1227 return tlb_set_page (env, address, phys_addr, prot, mmu_idx, in cpu_arm_handle_mmu_fault()