/arch/xtensa/include/asm/ |
D | cacheasm.h | 35 .macro __loop_cache_unroll ar at insn size line_width max_immed 45 __loopi \ar, \at, \size, (_reps << (\line_width)) 48 \insn \ar, _index << (\line_width) 51 __endla \ar, \at, _reps << (\line_width) 56 .macro __loop_cache_all ar at insn size line_width max_immed 58 movi \ar, 0 59 __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed 64 .macro __loop_cache_range ar as at insn line_width 66 extui \at, \ar, 0, \line_width 69 __loops \ar, \as, \at, \line_width [all …]
|
D | asmmacro.h | 49 .macro __loopi ar, at, size, incr 55 addi \at, \ar, \size 65 .macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond 91 add \at, \ar, \at 93 add \at, \ar, \as 104 .macro __loopt ar, as, at, incr_log2 107 sub \at, \as, \ar 137 .macro __endl ar, as 139 bltu \ar, \as, 98b 148 .macro __endla ar, as, incr [all …]
|
/arch/ia64/lib/ |
D | xor.S | 14 .save ar.pfs, r31 15 alloc r31 = ar.pfs, 3, 0, 13, 16 16 .save ar.lc, r30 17 mov r30 = ar.lc 23 mov ar.ec = 6 + 2 30 mov ar.lc = in0 43 mov ar.lc = r30 52 .save ar.pfs, r31 53 alloc r31 = ar.pfs, 4, 0, 20, 24 54 .save ar.lc, r30 [all …]
|
D | flush.S | 29 alloc r2=ar.pfs,2,0,0,0 43 .save ar.lc,r3 44 mov r3=ar.lc // save ar.lc 48 mov ar.lc=r8 62 mov ar.lc=r3 // restore ar.lc 82 alloc r2=ar.pfs,2,0,0,0 98 .save ar.lc,r3 99 mov r3=ar.lc // save ar.lc 103 mov ar.lc=r8 118 mov ar.lc=r3 // restore ar.lc
|
D | copy_user.S | 77 .save ar.pfs, saved_pfs 78 alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7) 88 .save ar.lc, saved_lc 89 mov saved_lc=ar.lc // preserve ar.lc (slow) 100 mov ar.ec=PIPE_DEPTH 104 mov ar.lc=len2 // initialize lc for small count 119 mov ar.lc=saved_lc 121 mov ar.pfs=saved_pfs // restore ar.ec 192 mov ar.ec=PIPE_DEPTH 194 mov ar.lc=cnt [all …]
|
D | strnlen_user.S | 21 alloc r2=ar.pfs,2,0,0,0 22 .save ar.lc, r16 23 mov r16=ar.lc // preserve ar.lc 29 mov ar.lc=r3 45 mov ar.lc=r16 // restore ar.lc
|
D | memcpy.S | 50 .save ar.pfs, saved_pfs 51 alloc saved_pfs=ar.pfs,3,Nrot,0,Nrot 52 .save ar.lc, saved_lc 53 mov saved_lc=ar.lc 75 mov ar.ec=N 79 mov ar.lc=cnt 108 mov ar.lc=saved_lc 110 mov ar.pfs=saved_pfs 122 mov ar.ec=MEM_LAT 125 mov ar.lc=cnt [all …]
|
D | copy_page.S | 41 .save ar.pfs, saved_pfs 42 alloc saved_pfs=ar.pfs,3,Nrot-3,0,Nrot 48 .save ar.lc, saved_lc 49 mov saved_lc=ar.lc 50 mov ar.ec=PIPE_DEPTH 65 mov ar.lc=lcount 97 mov ar.pfs=saved_pfs 98 mov ar.lc=saved_lc
|
D | clear_user.S | 58 .save ar.pfs, saved_pfs 59 alloc saved_pfs=ar.pfs,2,0,0,0 61 .save ar.lc, saved_lc 62 mov saved_lc=ar.lc // preserve ar.lc (slow) 70 mov ar.lc=tmp // initialize lc for small count 91 mov ar.lc=saved_lc 127 mov ar.lc=tmp 155 mov ar.lc=saved_lc 209 mov ar.lc=saved_lc
|
D | clear_page.S | 38 .save ar.lc, saved_lc 39 mov saved_lc = ar.lc 42 mov ar.lc = (PREFETCH_LINES - 1) 52 mov ar.lc = r16 // one L3 line per iteration 76 mov ar.lc = saved_lc // restore lc
|
D | copy_page_mck.S | 104 alloc r8 = ar.pfs, 2, Nrot-2, 0, Nrot 109 .save ar.lc, saved_lc 110 mov saved_lc = ar.lc 117 mov ar.ec = 1 // special unrolled loop 120 mov ar.lc = 2*PREFETCH_DIST - 1 139 mov ar.lc = t1 // with 64KB pages, t1 is too big to fit in 8 bits! 140 mov ar.ec = N // # of stages in pipeline 184 mov ar.lc = saved_lc
|
/arch/ia64/kernel/ |
D | relocate_kernel.S | 21 alloc r31=ar.pfs,4,0,0,0 39 mov ar.rsc=0 // put RSE in enforced lazy mode 44 mov r18=ar.rnat 45 mov ar.bspstore=r8 52 mov ar.rnat=r18 81 mov ar.lc=r20 154 mov ar.lc=r14;; 189 alloc loc0=ar.pfs,1,2,0,0 191 mov ar.rsc=0 // put RSE in enforced lazy mode 203 mov r4=ar.rnat [all …]
|
D | pal.S | 34 alloc r3=ar.pfs,1,0,0,0 59 alloc loc1 = ar.pfs,4,5,0,0 69 mov loc4=ar.rsc // save RSE configuration 71 mov ar.rsc=0 // put RSE in enforced lazy, LE mode 85 mov ar.rsc = loc4 // restore RSE configuration 86 mov ar.pfs = loc1 103 alloc loc1 = ar.pfs,4,4,4,0 122 mov ar.pfs = loc1 151 alloc loc1 = ar.pfs,4,7,0,0 168 mov loc4=ar.rsc // save RSE configuration [all …]
|
D | gate.S | 105 .savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF; \ 106 .savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF; \ 109 .savesp ar.pfs, CFM_OFF+SIGCONTEXT_OFF; \ 129 mov.m r9=ar.bsp // fetch ar.bsp 130 .spillsp.p p1, ar.rnat, RNAT_OFF+SIGCONTEXT_OFF 133 alloc r8=ar.pfs,0,0,3,0 144 .spillsp ar.bsp, BSP_OFF+SIGCONTEXT_OFF 168 mov r14=ar.bsp 198 mov ar.rsc=0 // put RSE into enforced lazy mode 200 .save ar.rnat, r19 [all …]
|
D | minstate.h | 11 (pUStk) mov.m r20=ar.itc; 50 mov r27=ar.rsc; /* M */ \ 52 mov r25=ar.unat; /* M */ \ 54 mov r26=ar.pfs; /* I */ \ 56 mov r21=ar.fpsr; /* M */ \ 70 (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ 72 (pUStk) mov.m r24=ar.rnat; \ 78 (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ 80 (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ 83 (pUStk) mov r18=ar.bsp; \ [all …]
|
D | efi_stub.S | 48 alloc loc1=ar.pfs,8,7,7,0 54 mov loc4=ar.rsc // save RSE configuration 55 mov ar.rsc=0 // put RSE in enforced lazy, LE mode 77 .ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode 82 .ret2: mov ar.rsc=loc4 // restore RSE configuration 83 mov ar.pfs=loc1
|
D | esi_stub.S | 51 alloc loc1=ar.pfs,2,7,8,0 72 mov loc4=ar.rsc // save RSE configuration 73 mov ar.rsc=0 // put RSE in enforced lazy, LE mode 88 .ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode 93 .ret2: mov ar.rsc=loc4 // restore RSE configuration 94 mov ar.pfs=loc1
|
D | entry.S | 65 alloc loc1=ar.pfs,8,2,3,0 75 mov ar.pfs=loc1 // restore ar.pfs 80 (p6) mov ar.pfs=r0 // clear ar.pfs on success 89 mov ar.unat=0; mov ar.lc=0 113 alloc r16=ar.pfs,8,2,6,0 127 mov ar.pfs=loc1 141 alloc r16=ar.pfs,8,2,6,0 155 mov ar.pfs=loc1 168 alloc r16=ar.pfs,1,0,0,0 241 mov r17=ar.unat // preserve caller's [all …]
|
D | entry.h | 32 .spillsp ar.pfs, PT(CR_IFS)+16+(off); \ 33 .spillsp ar.unat, PT(AR_UNAT)+16+(off); \ 34 .spillsp ar.fpsr, PT(AR_FPSR)+16+(off); \ 43 .savesp ar.unat,SW(CALLER_UNAT)+16+(off); \ 44 .savesp ar.fpsr,SW(AR_FPSR)+16+(off); \ 60 .spillsp ar.pfs,SW(AR_PFS)+16+(off); .spillsp ar.lc,SW(AR_LC)+16+(off); \ 62 .spillsp ar.rnat,SW(AR_RNAT)+16+(off); \ 63 .spillsp ar.bspstore,SW(AR_BSPSTORE)+16+(off); \
|
D | mca_drv_asm.S | 21 alloc r16=ar.pfs,0,2,3,0 // make a new frame 22 mov ar.rsc=0 29 mov ar.bspstore=r22 51 mov ar.pfs=loc0
|
D | mca_asm.S | 83 mov ar.lc=r20 263 mov ar.rsc=3 // set eager mode for C handler 268 alloc r14=ar.pfs,0,0,3,0 284 alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame 372 mov ar.rsc=3 // set eager mode for C handler 377 alloc r14=ar.pfs,0,0,3,0 398 alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame 525 mov temp3=ar.csd 526 mov temp4=ar.ssd 531 mov temp3=ar.unat [all …]
|
D | head.S | 52 mov ar.lc=IA64_NUM_DBG_REGS-1;; \ 60 mov ar.lc=IA64_NUM_DBG_REGS-1;; \ 91 mov ar.lc=0x08-1;; \ 119 SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \ 120 SAVE_FROM_REG(ar.pfs,_reg1,_reg2);; \ 121 SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \ 122 SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \ 123 SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \ 137 SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \ 292 mov ar.fpsr=r2 [all …]
|
/arch/ia64/include/asm/ |
D | mca_asm.h | 88 mov ar.rsc = 0 ; \ 91 mov temp2 = ar.bspstore; \ 95 mov temp1 = ar.rnat; \ 97 mov ar.bspstore = temp2; \ 99 mov ar.rnat = temp1; \ 171 mov ar.rsc = 0; \ 174 mov r13 = ar.k6; \ 175 mov temp2 = ar.bspstore; \ 179 mov temp1 = ar.rnat; \ 181 mov ar.bspstore = temp2; \ [all …]
|
/arch/s390/kvm/ |
D | gaccess.h | 190 u8 ar, unsigned long *gpa, enum gacc_mode mode); 191 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, 194 int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, 246 int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, in write_guest() argument 249 return access_guest(vcpu, ga, ar, data, len, GACC_STORE); in write_guest() 266 int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, in read_guest() argument 269 return access_guest(vcpu, ga, ar, data, len, GACC_FETCH); in read_guest()
|
D | priv.c | 89 u8 ar; in handle_set_clock() local 97 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_set_clock() 100 rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod)); in handle_set_clock() 129 u8 ar; in handle_set_prefix() local 136 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_set_prefix() 143 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); in handle_set_prefix() 167 u8 ar; in handle_store_prefix() local 174 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_store_prefix() 183 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); in handle_store_prefix() 197 u8 ar; in handle_store_cpu_address() local [all …]
|