/arch/ia64/kernel/ |
D | brl_emu.c | 76 if (ia64_psr(regs)->ri != 1) return rv; in ia64_emulate_brl() 105 ia64_psr(regs)->ri = 0; in ia64_emulate_brl() 121 ia64_psr(regs)->ri = 0; in ia64_emulate_brl() 161 cpl = ia64_psr(regs)->cpl; in ia64_emulate_brl() 187 ia64_psr(regs)->ri = 0; in ia64_emulate_brl() 189 if (ia64_psr(regs)->it == 0) in ia64_emulate_brl() 206 } else if (ia64_psr(regs)->tb) { in ia64_emulate_brl() 219 } else if (ia64_psr(regs)->ss) { in ia64_emulate_brl()
|
D | traps.c | 103 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); in ia64_bad_break() 197 struct ia64_psr *psr = ia64_psr(regs); in disabled_fph_fault() 295 if (!fp_fault && (ia64_psr(regs)->ri == 0)) in handle_fpu_swa() 328 current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr); in handle_fpu_swa() 350 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); in handle_fpu_swa() 374 siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri); in handle_fpu_swa() 422 si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(®s)->ri); in ia64_illegal_op_fault() 452 ia64_psr(®s)->ed = 1; in ia64_fault() 456 iip = regs.cr_iip + ia64_psr(®s)->ri; in ia64_fault() 468 regs.cr_iip + ia64_psr(®s)->ri, regs.pr); in ia64_fault() [all …]
|
D | kprobes.c | 752 ia64_psr(regs)->ss = 0; in resume_execution() 769 ia64_psr(regs)->ri = slot; in prepare_ss() 772 ia64_psr(regs)->ss = 1; in prepare_ss() 777 unsigned int slot = ia64_psr(regs)->ri; in is_ia64_break_inst() 807 ia64_psr(regs)->ss = 0; in pre_kprobes_handler() 876 ia64_psr(regs)->ri = p->ainsn.slot; in pre_kprobes_handler() 879 ia64_psr(regs)->ss = 0; in pre_kprobes_handler() 939 ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; in kprobe_fault_handler() 1073 ia64_psr(regs)->ri = addr & 0xf; in setjmp_pre_handler()
|
D | signal.c | 74 ia64_psr(&scr->pt)->ri = ip & 0x3; in restore_sigcontext() 90 struct ia64_psr *psr = ia64_psr(&scr->pt); in restore_sigcontext() 262 err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip); in setup_sigcontext() 376 ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */ in setup_frame() 377 ia64_psr(&scr->pt)->be = 0; /* force little-endian byte-order */ in setup_frame()
|
D | mca_drv.c | 499 struct ia64_psr *psr1, *psr2; in recover_from_read_error() 524 psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr); in recover_from_read_error() 525 psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr); in recover_from_read_error() 549 psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; in recover_from_read_error()
|
D | ptrace.c | 153 unsigned long w0, ri = ia64_psr(regs)->ri + 1; in ia64_increment_ip() 170 ia64_psr(regs)->ri = ri; in ia64_increment_ip() 176 unsigned long w0, ri = ia64_psr(regs)->ri - 1; in ia64_decrement_ip() 178 if (ia64_psr(regs)->ri == 0) { in ia64_decrement_ip() 191 ia64_psr(regs)->ri = ri; in ia64_decrement_ip() 681 struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); in ia64_flush_fph() 707 struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); in ia64_sync_fph() 1114 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); in user_enable_single_step() 1123 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); in user_enable_block_step() 1132 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); in user_disable_single_step()
|
D | process.c | 101 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; in show_regs() 170 if (!ia64_psr(&scr->pt)->lp) in do_notify_resume_user() 171 ia64_psr(&scr->pt)->lp = 1; in do_notify_resume_user() 493 dst[42] = ip + ia64_psr(pt)->ri; in do_copy_task_regs()
|
D | perfmon.c | 4005 ia64_psr(regs)->pp = 0; in pfm_stop() 4020 ia64_psr(regs)->up = 0; in pfm_stop() 4027 ia64_psr(tregs)->up = 0; in pfm_stop() 4070 ia64_psr(regs)->pp = 1; in pfm_start() 4101 ia64_psr(regs)->up = 1; in pfm_start() 4115 ia64_psr(tregs)->up = 1; in pfm_start() 4334 ia64_psr(regs)->sp = 0; in pfm_context_load() 4383 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0; in pfm_context_load() 4507 ia64_psr(regs)->sp = 1; in pfm_context_unload() 4609 BUG_ON(ia64_psr(regs)->up); in pfm_exit_thread() [all …]
|
D | unaligned.c | 1294 struct ia64_psr *ipsr = ia64_psr(regs); in ia64_handle_unaligned() 1306 if (ia64_psr(regs)->be) { in ia64_handle_unaligned() 1319 eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri); in ia64_handle_unaligned()
|
D | mca.c | 902 if (ia64_psr(regs)->ic) { in finish_pt_regs() 929 if (ia64_psr(regs)->bn) in finish_pt_regs() 997 if (ia64_psr(regs)->dt == 0) { in ia64_mca_modify_original_stack() 1009 if (ia64_psr(regs)->rt == 0) { in ia64_mca_modify_original_stack()
|
/arch/ia64/include/asm/ |
D | ptrace.h | 52 # define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri) 89 # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) macro 90 # define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
|
D | switch_to.h | 49 ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ 61 if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ 62 ia64_psr(task_pt_regs(prev))->mfh = 0; \
|
D | processor.h | 86 struct ia64_psr { struct 350 _regs->cr_iip + ia64_psr(_regs)->ri; \
|
D | uaccess.h | 357 e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri); in ia64_done_with_exception()
|
/arch/ia64/kvm/ |
D | vcpu.c | 128 static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr) in mm_switch_action() 133 void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, in switch_mm_mode() 134 struct ia64_psr new_psr) in switch_mm_mode() 184 void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, in check_mm_mode_switch() 185 struct ia64_psr new_psr) in check_mm_mode_switch() 1080 struct ia64_psr vpsr; in vcpu_tpa() 1087 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); in vcpu_tpa() 1631 struct ia64_psr old_psr, new_psr; in vcpu_set_psr() 1633 old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); in vcpu_set_psr() 1658 new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); in vcpu_set_psr() [all …]
|
D | process.c | 497 if (!fp_fault && (ia64_psr(regs)->ri == 0)) in vmm_handle_fpu_swa() 657 if (ia64_psr(regs)->cpl == 0) { in kvm_ia64_handle_break() 727 struct ia64_psr vpsr; in vhpi_detection() 729 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); in vhpi_detection()
|
D | vtlb.c | 95 struct ia64_psr vpsr; in vhpt_enabled() 97 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr); in vhpt_enabled()
|
D | mmio.c | 185 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri; in emulate_io_inst()
|
/arch/ia64/mm/ |
D | fault.c | 245 ia64_psr(regs)->ed = 1; in ia64_do_page_fault() 268 ia64_psr(regs)->ed = 1; in ia64_do_page_fault()
|
D | extable.c | 114 ia64_psr(regs)->ri = fix & 0x3; /* set continuation slot number */ in ia64_handle_exception()
|