/arch/nios2/kernel/ |
D | misaligned.c | 72 unsigned int fault; in handle_unaligned_c() local 85 fault = 0; in handle_unaligned_c() 98 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 99 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 111 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 112 fault |= __put_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 116 fault |= __get_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 117 fault |= __get_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() 133 fault |= __put_user(d0, (u8 *)(addr+0)); in handle_unaligned_c() 134 fault |= __put_user(d1, (u8 *)(addr+1)); in handle_unaligned_c() [all …]
|
/arch/arc/kernel/ |
D | unaligned.c | 50 goto fault; \ 65 goto fault; \ 92 goto fault; \ 125 goto fault; \ 159 fault: state->fault = 1; in fixup_load() 179 goto fault; in fixup_store() 191 fault: state->fault = 1; in fixup_store() 225 if (state.fault) in misaligned_fixup() 226 goto fault; in misaligned_fixup() 230 goto fault; in misaligned_fixup() [all …]
|
/arch/s390/mm/ |
D | fault.c | 322 vm_fault_t fault) in do_fault_error() argument 326 switch (fault) { in do_fault_error() 335 si_code = (fault == VM_FAULT_BADMAP) ? in do_fault_error() 351 if (fault & VM_FAULT_OOM) { in do_fault_error() 356 } else if (fault & VM_FAULT_SIGSEGV) { in do_fault_error() 362 } else if (fault & VM_FAULT_SIGBUS) { in do_fault_error() 395 vm_fault_t fault; in do_exception() local 415 fault = VM_FAULT_BADCONTEXT; in do_exception() 421 fault = VM_FAULT_BADMAP; in do_exception() 447 fault = VM_FAULT_BADMAP; in do_exception() [all …]
|
/arch/m68k/mm/ |
D | fault.c | 73 vm_fault_t fault; in do_page_fault() local 138 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 139 pr_debug("handle_mm_fault returns %x\n", fault); in do_page_fault() 141 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_page_fault() 144 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 145 if (fault & VM_FAULT_OOM) in do_page_fault() 147 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 149 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 160 if (fault & VM_FAULT_MAJOR) in do_page_fault() 164 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/arch/parisc/mm/ |
D | fault.c | 266 vm_fault_t fault = 0; in do_page_fault() local 305 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 307 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_page_fault() 310 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 316 if (fault & VM_FAULT_OOM) in do_page_fault() 318 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 320 else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| in do_page_fault() 326 if (fault & VM_FAULT_MAJOR) in do_page_fault() 330 if (fault & VM_FAULT_RETRY) { in do_page_fault() 391 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { in do_page_fault() [all …]
|
/arch/hexagon/mm/ |
D | vm_fault.c | 42 vm_fault_t fault; in do_page_fault() local 92 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 94 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_page_fault() 98 if (likely(!(fault & VM_FAULT_ERROR))) { in do_page_fault() 100 if (fault & VM_FAULT_MAJOR) in do_page_fault() 104 if (fault & VM_FAULT_RETRY) { in do_page_fault() 121 if (fault & VM_FAULT_OOM) { in do_page_fault() 129 if (fault & VM_FAULT_SIGBUS) { in do_page_fault()
|
/arch/x86/hyperv/ |
D | nested.c | 29 goto fault; in hyperv_flush_guest_mapping() 40 goto fault; in hyperv_flush_guest_mapping() 53 fault: in hyperv_flush_guest_mapping() 101 goto fault; in hyperv_flush_guest_mapping_range() 111 goto fault; in hyperv_flush_guest_mapping_range() 120 goto fault; in hyperv_flush_guest_mapping_range() 132 fault: in hyperv_flush_guest_mapping_range()
|
/arch/mips/kernel/ |
D | unaligned.c | 944 goto fault; in emulate_load_store_insn() 953 goto fault; in emulate_load_store_insn() 979 goto fault; in emulate_load_store_insn() 992 goto fault; in emulate_load_store_insn() 1005 goto fault; in emulate_load_store_insn() 1020 goto fault; in emulate_load_store_insn() 1033 goto fault; in emulate_load_store_insn() 1058 goto fault; in emulate_load_store_insn() 1077 goto fault; in emulate_load_store_insn() 1096 goto fault; in emulate_load_store_insn() [all …]
|
/arch/alpha/mm/ |
D | fault.c | 91 vm_fault_t fault; in do_page_fault() local 151 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 153 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_page_fault() 156 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 157 if (fault & VM_FAULT_OOM) in do_page_fault() 159 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 161 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 167 if (fault & VM_FAULT_MAJOR) in do_page_fault() 171 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/arch/microblaze/mm/ |
D | fault.c | 93 vm_fault_t fault; in do_page_fault() local 218 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 220 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_page_fault() 223 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 224 if (fault & VM_FAULT_OOM) in do_page_fault() 226 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 228 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 234 if (unlikely(fault & VM_FAULT_MAJOR)) in do_page_fault() 238 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/arch/unicore32/mm/ |
D | fault.c | 166 vm_fault_t fault; in __do_pf() local 169 fault = VM_FAULT_BADMAP; in __do_pf() 181 fault = VM_FAULT_BADACCESS; in __do_pf() 189 fault = handle_mm_fault(vma, addr & PAGE_MASK, flags); in __do_pf() 190 return fault; in __do_pf() 196 return fault; in __do_pf() 204 vm_fault_t fault; in do_pf() local 247 fault = __do_pf(mm, addr, fsr, flags, tsk); in do_pf() 253 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_pf() 256 if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) { in do_pf() [all …]
|
/arch/powerpc/mm/ |
D | fault.c | 143 vm_fault_t fault) in do_sigbus() argument 150 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { in do_sigbus() 156 if (fault & VM_FAULT_HWPOISON_LARGE) in do_sigbus() 157 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); in do_sigbus() 158 if (fault & VM_FAULT_HWPOISON) in do_sigbus() 171 vm_fault_t fault) in mm_fault_error() argument 181 if (fault & VM_FAULT_OOM) { in mm_fault_error() 190 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| in mm_fault_error() 192 return do_sigbus(regs, addr, fault); in mm_fault_error() 193 else if (fault & VM_FAULT_SIGSEGV) in mm_fault_error() [all …]
|
/arch/openrisc/mm/ |
D | fault.c | 52 vm_fault_t fault; in do_page_fault() local 162 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 164 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_page_fault() 167 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 168 if (fault & VM_FAULT_OOM) in do_page_fault() 170 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 172 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 179 if (fault & VM_FAULT_MAJOR) in do_page_fault() 183 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/arch/nios2/mm/ |
D | fault.c | 49 vm_fault_t fault; in do_page_fault() local 134 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 136 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_page_fault() 139 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 140 if (fault & VM_FAULT_OOM) in do_page_fault() 142 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 144 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 155 if (fault & VM_FAULT_MAJOR) in do_page_fault() 159 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/arch/ia64/mm/ |
D | fault.c | 67 vm_fault_t fault; in ia64_do_page_fault() local 142 fault = handle_mm_fault(vma, address, flags); in ia64_do_page_fault() 144 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in ia64_do_page_fault() 147 if (unlikely(fault & VM_FAULT_ERROR)) { in ia64_do_page_fault() 153 if (fault & VM_FAULT_OOM) { in ia64_do_page_fault() 155 } else if (fault & VM_FAULT_SIGSEGV) { in ia64_do_page_fault() 157 } else if (fault & VM_FAULT_SIGBUS) { in ia64_do_page_fault() 165 if (fault & VM_FAULT_MAJOR) in ia64_do_page_fault() 169 if (fault & VM_FAULT_RETRY) { in ia64_do_page_fault()
|
/arch/arc/mm/ |
D | fault.c | 68 vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */ in do_page_fault() local 128 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 133 if (unlikely(fault & VM_FAULT_RETRY)) { in do_page_fault() 164 if (likely(!(fault & VM_FAULT_ERROR))) { in do_page_fault() 165 if (fault & VM_FAULT_MAJOR) { in do_page_fault() 182 if (fault & VM_FAULT_OOM) { in do_page_fault() 187 if (fault & VM_FAULT_SIGBUS) { in do_page_fault()
|
/arch/csky/mm/ |
D | fault.c | 53 int fault; in do_page_fault() local 149 fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0); in do_page_fault() 150 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 151 if (fault & VM_FAULT_OOM) in do_page_fault() 153 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 155 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 159 if (fault & VM_FAULT_MAJOR) { in do_page_fault()
|
/arch/riscv/mm/ |
D | fault.c | 35 vm_fault_t fault; in do_page_fault() local 113 fault = handle_mm_fault(vma, addr, flags); in do_page_fault() 120 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk)) in do_page_fault() 123 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 124 if (fault & VM_FAULT_OOM) in do_page_fault() 126 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 137 if (fault & VM_FAULT_MAJOR) { in do_page_fault() 146 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/arch/xtensa/mm/ |
D | fault.c | 45 vm_fault_t fault; in do_page_fault() local 111 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 113 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in do_page_fault() 116 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 117 if (fault & VM_FAULT_OOM) in do_page_fault() 119 else if (fault & VM_FAULT_SIGSEGV) in do_page_fault() 121 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 126 if (fault & VM_FAULT_MAJOR) in do_page_fault() 130 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|
/arch/sh/mm/ |
D | fault.c | 299 unsigned long address, vm_fault_t fault) in mm_fault_error() argument 306 if (!(fault & VM_FAULT_RETRY)) in mm_fault_error() 313 if (!(fault & VM_FAULT_ERROR)) in mm_fault_error() 316 if (fault & VM_FAULT_OOM) { in mm_fault_error() 332 if (fault & VM_FAULT_SIGBUS) in mm_fault_error() 334 else if (fault & VM_FAULT_SIGSEGV) in mm_fault_error() 382 vm_fault_t fault; in do_page_fault() local 467 fault = handle_mm_fault(vma, address, flags); in do_page_fault() 469 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) in do_page_fault() 470 if (mm_fault_error(regs, error_code, address, fault)) in do_page_fault() [all …]
|
/arch/mips/mm/ |
D | fault.c | 46 vm_fault_t fault; in __do_page_fault() local 155 fault = handle_mm_fault(vma, address, flags); in __do_page_fault() 157 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) in __do_page_fault() 161 if (unlikely(fault & VM_FAULT_ERROR)) { in __do_page_fault() 162 if (fault & VM_FAULT_OOM) in __do_page_fault() 164 else if (fault & VM_FAULT_SIGSEGV) in __do_page_fault() 166 else if (fault & VM_FAULT_SIGBUS) in __do_page_fault() 171 if (fault & VM_FAULT_MAJOR) { in __do_page_fault() 180 if (fault & VM_FAULT_RETRY) { in __do_page_fault()
|
/arch/arm/mm/ |
D | fault.c | 207 vm_fault_t fault; in __do_page_fault() local 210 fault = VM_FAULT_BADMAP; in __do_page_fault() 222 fault = VM_FAULT_BADACCESS; in __do_page_fault() 234 return fault; in __do_page_fault() 243 vm_fault_t fault; in do_page_fault() local 292 fault = __do_page_fault(mm, addr, fsr, flags, tsk); in do_page_fault() 298 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { in do_page_fault() 311 if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) { in do_page_fault() 312 if (fault & VM_FAULT_MAJOR) { in do_page_fault() 321 if (fault & VM_FAULT_RETRY) { in do_page_fault() [all …]
|
D | alignment.c | 222 goto fault; \ 243 goto fault; \ 275 goto fault; \ 317 goto fault; \ 384 fault: in do_alignment_ldrhstrh() 446 fault: in do_alignment_ldrdstrd() 482 fault: in do_alignment_ldrstr() 582 fault: in do_alignment_ldmstm() 772 int fault; in alignment_get_arm() local 775 fault = get_user(instr, ip); in alignment_get_arm() [all …]
|
/arch/sparc/kernel/ |
D | wuf.S | 258 LEON_PI( lda [%l4] ASI_LEON_MMUREGS, %g0) ! clear fault status 259 SUN_PI_( lda [%l4] ASI_M_MMUREGS, %g0) ! clear fault status 268 or %l5, 0x2, %l5 ! turn on no-fault bit 284 andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit 289 LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %g0) ! read fault address 290 SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %g0) ! read fault address 293 LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %twin_tmp2) ! read fault status 294 SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2) ! read fault status 295 andcc %twin_tmp2, 0x2, %g0 ! did fault occur?
|
/arch/nds32/mm/ |
D | fault.c | 77 vm_fault_t fault; in do_page_fault() local 206 fault = handle_mm_fault(vma, addr, flags); in do_page_fault() 213 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { in do_page_fault() 219 if (unlikely(fault & VM_FAULT_ERROR)) { in do_page_fault() 220 if (fault & VM_FAULT_OOM) in do_page_fault() 222 else if (fault & VM_FAULT_SIGBUS) in do_page_fault() 235 if (fault & VM_FAULT_MAJOR) { in do_page_fault() 244 if (fault & VM_FAULT_RETRY) { in do_page_fault()
|