/arch/s390/kernel/ |
D | nospec-branch.c | 102 u8 *instr, *thunk, *br; in __nospec_revert() local 118 br = thunk + (*(int *)(thunk + 2)) * 2; in __nospec_revert() 124 br = thunk + (*(int *)(thunk + 2)) * 2; in __nospec_revert() 128 if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0) in __nospec_revert() 134 insnbuf[0] = br[0]; in __nospec_revert() 135 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); in __nospec_revert() 136 if (br[0] == 0x47) { in __nospec_revert() 138 insnbuf[2] = br[2]; in __nospec_revert() 139 insnbuf[3] = br[3]; in __nospec_revert() 145 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); in __nospec_revert() [all …]
|
/arch/ia64/lib/ |
D | copy_user.S | 90 (p8) br.ret.spnt.many rp // empty mempcy() 108 (p10) br.cond.dptk .long_copy_user 117 br.ctop.dptk.few 1b 122 br.ret.sptk.many rp // end of short memcpy 158 (p15) br.cond.spnt 1f 168 br.cond.spnt .word_copy_user 199 br.ctop.dptk.few 2b 205 (p9) br.cond.spnt 4f // if (16 > len1) skip 8-byte copy 237 (pred) br.cond.spnt .copy_user_bit##shift 245 br.ctop.dptk 1b; \ [all …]
|
D | clear_user.S | 67 (p6) br.ret.spnt.many rp 71 (p6) br.cond.dptk .long_do_clear 84 br.cloop.dptk 1b 92 br.ret.sptk.many rp // end of short clear_user 124 (p6) br.cond.dpnt .dotail // we have less than 16 bytes left 153 br.cloop.dptk 2b 183 br.ret.sptk.many rp // end of most likely path 210 br.ret.sptk.many rp
|
D | memcpy_mck.S | 80 br.cond.sptk .common_code 105 (p15) br.cond.dpnt .memcpy_short 106 (p13) br.cond.dpnt .align_dest 107 (p14) br.cond.dpnt .unaligned_src 130 (p6) br.cond.dpnt .long_copy 136 br.cloop.dptk.few .prefetch 153 (p10) br.dpnt.few .aligned_src_tail 166 br.ctop.dptk.few 1b 186 br.dptk.many .memcpy_short 216 br.ctop.sptk .prefetch_loop [all …]
|
D | memset.S | 78 (p_scr) br.ret.dpnt.many rp // return immediately if count = 0 85 (p_scr) br.cond.dptk.many .move_bytes_unaligned // go move just a few (M_B_U) 121 (p_scr) br.cond.dpnt.many .fraction_of_line // go move just a few 127 (p_zr) br.cond.dptk.many .l1b // Jump to use stf.spill 152 br.cloop.dptk.few .pref_l1a 191 br.cloop.dptk.few .l1ax 195 (p_scr) br.cond.dpnt.many .fraction_of_line // Branch no. 2 196 br.cond.dpnt.many .move_bytes_from_alignment // Branch no. 3 222 br.cloop.dptk.few .pref_l1b 245 br.cloop.dptk.few .l1bx [all …]
|
D | strlen.S | 121 (p6) br.wtop.dptk 1b // loop until p6 == 0 132 (p6) br.cond.spnt .recover // jump to recovery if val1 is NaT 139 (p7) br.cond.spnt .recover // jump to recovery if val2 is NaT 151 br.ret.sptk.many rp // end of normal execution 185 (p6) br.wtop.dptk 2b // loop until p6 == 0 193 br.ret.sptk.many rp // end of successful recovery code
|
D | memcpy.S | 65 (p6) br.ret.spnt.many rp // zero length, return immediately 84 (p7) br.cond.spnt.few .memcpy_short 85 (p6) br.cond.spnt.few .memcpy_long 105 br.ctop.dptk.few 1b 111 br.ret.sptk.many rp 149 br.ctop.dptk.few 1b 154 br.ret.sptk.many rp 256 br.sptk.few b6 272 br.ret.sptk.many rp 286 br.ctop.dptk.few 1b; \ [all …]
|
D | strnlen_user.S | 38 (p6) br.cond.dpnt .Lexit 39 br.cloop.dptk.few .Loop1 46 br.ret.sptk.many rp
|
D | strncpy_from_user.S | 30 (p6) br.ret.spnt.many rp 40 (p8) br.cond.dpnt.few .Loop1 45 br.ret.sptk.many rp
|
D | flush.S | 56 br.cloop.sptk.few .Loop 63 br.ret.sptk.many rp 112 br.cloop.sptk.few .Loop_fc 119 br.ret.sptk.many rp
|
D | xor.S | 41 br.ctop.dptk.few 0b 45 br.ret.sptk.few rp 82 br.ctop.dptk.few 0b 86 br.ret.sptk.few rp 126 br.ctop.dptk.few 0b 130 br.ret.sptk.few rp 175 br.ctop.dptk.few 0b 179 br.ret.sptk.few rp
|
D | ip_fast_csum.S | 46 (p6) br.spnt .generic 77 br.ret.sptk.many b0 90 br.call.sptk.many b0=do_csum 95 br.ret.sptk.many b0 146 br.ret.sptk.many b0
|
/arch/ia64/kernel/ |
D | pal.S | 38 br.ret.sptk.many rp 48 br.cond.sptk.many rp 83 br.cond.sptk.many b7 90 br.ret.sptk.many b0 120 br.call.sptk.many rp=b7 // now make the call 126 br.ret.sptk.many b0 180 br.call.sptk.many rp=ia64_switch_mode_phys 184 br.cond.sptk.many b7 190 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode 198 br.ret.sptk.many b0 [all …]
|
D | entry.S | 72 br.call.sptk.many rp=sys_execve 81 (p7) br.ret.sptk.many rp 101 br.ret.sptk.many rp 127 br.call.sptk.many rp=do_fork 132 br.ret.sptk.many rp 158 br.call.sptk.many rp=do_fork 163 br.ret.sptk.many rp 192 (p6) br.cond.dpnt .map 205 br.ret.sptk.many rp // boogie on out in new context 223 br.cond.sptk .done [all …]
|
D | relocate_kernel.S | 80 (p7) br.cond.dpnt.few 4f 86 br.cloop.sptk.few 3b 91 br.sptk.few 2b 131 br.sptk.few .dest_page 138 (p6) br.cond.sptk.few .loop;; 142 (p6) br.cond.sptk.few .loop;; 145 (p6) br.cond.sptk.few .end_loop;; 148 (p6) br.cond.sptk.few .loop 160 br.ctop.sptk.few 1b 161 br.sptk.few .loop [all …]
|
D | esi_stub.S | 84 br.call.sptk.many rp=ia64_switch_mode_phys 87 br.call.sptk.many rp=b6 // call the ESI function 92 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode 97 br.ret.sptk.many rp
|
D | efi_stub.S | 66 br.call.sptk.many rp=ia64_switch_mode_phys 76 br.call.sptk.many rp=b6 // call the EFI function 81 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode 86 br.ret.sptk.many rp
|
D | mca_asm.S | 82 (p7) br.cond.dpnt.few 4f 88 br.cloop.sptk.few 3b 93 br.sptk.few 2b 134 br.sptk.many b1 145 br.sptk ia64_state_save // save the state that is not in minstate 156 (p7) br.spnt done_tlb_purge_and_reload 164 br.sptk.many ia64_do_tlb_purge;; 237 br.sptk ia64_new_stack 243 br.sptk ia64_set_kernel_registers 275 br.call.sptk.many b0=ia64_mca_handler [all …]
|
D | ivt.S | 84 br.sptk.many dispatch_to_fault_handler 190 (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) 271 (p6) br.cond.spnt page_fault 315 (p6) br.cond.spnt page_fault 357 (p8) br.cond.dptk .itlb_fault 368 (p8) br.cond.spnt page_fault 397 (p8) br.cond.dptk dtlb_fault 418 (p8) br.cond.spnt page_fault 506 (p6) br.cond.spnt page_fault 508 br.sptk.many b0 // return to continuation point [all …]
|
D | fsys.S | 90 (p8) br.spnt.many fsys_fallback_syscall 123 (p8) br.spnt.many fsys_fallback_syscall 147 (p6) br.cond.spnt.few .fail_einval 196 (p6) br.cond.spnt.few .fail_einval 211 (p6) br.cond.spnt.many fsys_fallback_syscall 273 (p7) br.cond.dpnt.few .time_redo // sequence number changed, redo 290 (p6) br.cond.dpnt.few .time_normalize 322 (p6) br.spnt.few fsys_fallback_syscall 325 br.many .gettime 343 (p6) br.cond.spnt.few .fail_einval // B [all …]
|
/arch/nios2/kernel/ |
D | entry.S | 238 br translate_rc_and_ret 288 br ret_from_exception 299 br ret_from_exception 317 br local_restart /* restart syscall */ 321 br ret_from_exception 379 br restore_all 417 br ret_from_exception 422 br ret_from_exception 426 br ret_from_exception 430 br ret_from_exception [all …]
|
/arch/csky/abiv2/ |
D | strcpy.S | 56 br 1b 61 br 9f 65 br 9f 69 br 9f 73 br 9f 77 br 9f 81 br 9f
|
/arch/arm64/kernel/ |
D | module-plts.c | 27 static u32 br; in get_plt_entry() local 29 if (!br) in get_plt_entry() 30 br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16, in get_plt_entry() 34 plt.br = cpu_to_le32(br); in get_plt_entry() 49 if (a->add != b->add || a->br != b->br) in plt_entries_equal() 110 u32 br; in module_emit_veneer_for_adrp() local 123 br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4, in module_emit_veneer_for_adrp() 127 plt[i].br = cpu_to_le32(br); in module_emit_veneer_for_adrp()
|
/arch/powerpc/sysdev/ |
D | fsl_lbc.c | 74 u32 br = in_be32(&lbc->bank[i].br); in fsl_lbc_find() local 77 if (br & BR_V && (br & or & BR_BA) == fsl_lbc_addr(addr_base)) in fsl_lbc_find() 97 u32 br; in fsl_upm_find() local 108 br = in_be32(&lbc->bank[bank].br); in fsl_upm_find() 110 switch (br & BR_MSEL) { in fsl_upm_find() 124 switch (br & BR_PS) { in fsl_upm_find()
|
/arch/ia64/include/asm/ |
D | asmmacro.h | 96 br.call.sptk.many b7=2f;; \ 101 br.ret.sptk.many b6;; \ 104 # define FSYS_RETURN br.ret.sptk.many b6
|