Home
last modified time | relevance | path

Searched refs:SHR (Results 1 – 25 of 146) sorted by relevance

123456

/external/speex/libspeexdsp/
Dfixed_generic.h59 #define SHR(a,shift) ((a) >> (shift)) macro
61 #define PSHR(a,shift) (SHR((a)+((EXTEND32(1)<<((shift))>>1)),shift))
78 #define MULT16_32_Q12(a,b) ADD32(MULT16_16((a),SHR((b),12)), SHR(MULT16_16((a),((b)&0x00000fff)),12…
79 #define MULT16_32_Q13(a,b) ADD32(MULT16_16((a),SHR((b),13)), SHR(MULT16_16((a),((b)&0x00001fff)),13…
80 #define MULT16_32_Q14(a,b) ADD32(MULT16_16((a),SHR((b),14)), SHR(MULT16_16((a),((b)&0x00003fff)),14…
82 #define MULT16_32_Q11(a,b) ADD32(MULT16_16((a),SHR((b),11)), SHR(MULT16_16((a),((b)&0x000007ff)),11…
83 #define MAC16_32_Q11(c,a,b) ADD32(c,ADD32(MULT16_16((a),SHR((b),11)), SHR(MULT16_16((a),((b)&0x0000…
85 #define MULT16_32_P15(a,b) ADD32(MULT16_16((a),SHR((b),15)), PSHR(MULT16_16((a),((b)&0x00007fff)),1…
86 #define MULT16_32_Q15(a,b) ADD32(MULT16_16((a),SHR((b),15)), SHR(MULT16_16((a),((b)&0x00007fff)),15…
87 #define MAC16_32_Q15(c,a,b) ADD32(c,ADD32(MULT16_16((a),SHR((b),15)), SHR(MULT16_16((a),((b)&0x0000…
[all …]
/external/libopus/celt/
Dfixed_generic.h41 #define MULT16_32_Q16(a,b) ((opus_val32)SHR((opus_int64)((opus_val16)(a))*(b),16))
43 #define MULT16_32_Q16(a,b) ADD32(MULT16_16((a),SHR((b),16)), SHR(MULT16_16SU((a),((b)&0x0000ffff)),…
50 #define MULT16_32_P16(a,b) ADD32(MULT16_16((a),SHR((b),16)), PSHR(MULT16_16SU((a),((b)&0x0000ffff))…
55 #define MULT16_32_Q15(a,b) ((opus_val32)SHR((opus_int64)((opus_val16)(a))*(b),15))
57 #define MULT16_32_Q15(a,b) ADD32(SHL(MULT16_16((a),SHR((b),16)),1), SHR(MULT16_16SU((a),((b)&0x0000…
62 #define MULT32_32_Q31(a,b) ((opus_val32)SHR((opus_int64)(a)*(opus_int64)(b),31))
64 …ADD32(ADD32(SHL(MULT16_16(SHR((a),16),SHR((b),16)),1), SHR(MULT16_16SU(SHR((a),16),((b)&0x0000ffff…
98 #define SHR(a,shift) ((a) >> (shift)) macro
100 #define PSHR(a,shift) (SHR((a)+((EXTEND32(1)<<((shift))>>1)),shift))
143 #define MAC16_32_Q15(c,a,b) ADD32((c),ADD32(MULT16_16((a),SHR((b),15)), SHR(MULT16_16((a),((b)&0x00…
[all …]
Dfixed_c5x.h71 #define MULT16_32_Q15(a,b) ADD32(SHL(MULT16_16((a),SHR((b),16)),1), SHR(MULT16_16SU((a),(b)),15))
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/
Dimmv216.ll126 ; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
127 ; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
147 ; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
148 ; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
168 ; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
169 ; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
189 ; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
190 ; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
211 ; VI-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[VAL]], 16
212 ; VI-DAG: v_mov_b32_e32 [[V_SHR:v[0-9]+]], [[SHR]]
[all …]
Dbfe-combine.ll10 ; CI: v_lshrrev_b32_e32 v[[SHR:[0-9]+]], 6, v{{[0-9]+}}
11 ; CI: v_and_b32_e32 v[[ADDRLO:[0-9]+]], 0x3fc, v[[SHR]]
33 ; CI: v_lshrrev_b32_e32 v[[SHR:[0-9]+]], 1, v{{[0-9]+}}
34 ; CI: v_and_b32_e32 v[[AND:[0-9]+]], 0x7fff8000, v[[SHR]]
Dalignbit-pat.ll4 ; GCN-DAG: s_load_dword s[[SHR:[0-9]+]]
6 ; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], s[[SHR]]
20 ; GCN-DAG: load_dword v[[SHR:[0-9]+]],
22 ; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], v[[SHR]]
Dscalar_to_vector.ll7 ; GCN: v_lshrrev_b32_e32 [[SHR:v[0-9]+]], 16, [[VAL]]
8 ; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[SHR]]
9 ; GCN: v_or_b32_e32 v[[OR:[0-9]+]], [[SHR]], [[SHL]]
Dllvm.amdgcn.buffer.store.format.d16.ll18 ; UNPACKED-DAG: s_lshr_b32 [[SHR:s[0-9]+]], [[S_DATA]], 16
21 ; UNPACKED-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], [[SHR]]
/external/autotest/server/cros/res_resource_monitor/
Dtop_whitespace_ridden.txt7 PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
24 PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
70 PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
Dtop_field_order_changed.txt7 PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
17 PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
Dtop_test_data.txt7 PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
24 PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
/external/libhevc/decoder/
Dihevcd_bitstream.h98 m_u4_bits |= SHR(m_u4_nxt_word, \
116 m_u4_bits |= SHR(m_u4_nxt_word, (WORD_SIZE - m_u4_bit_ofst)); \
157 m_u4_bits |= SHR(m_u4_nxt_word, \
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/
Drotate.ll133 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMTCONV]]
138 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
160 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMTCONV]]
165 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
190 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMT]]
194 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
213 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMT]]
217 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
Dbswap.ll126 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 %x, 16
127 ; CHECK-NEXT: [[SWAPHALF:%.*]] = or i32 [[SHL]], [[SHR]]
172 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 %x, 16
173 ; CHECK-NEXT: [[SWAPHALF:%.*]] = or i32 [[SHL]], [[SHR]]
/external/swiftshader/third_party/llvm-7.0/llvm/test/MC/X86/
Dintel-syntax-bitwise-ops.s44 add eax, 9876 SHR 1
58 add eax, 6 XOR 3 shl 1 SHR 1
/external/v8/src/parsing/
Dtoken.h84 T(SHR, ">>>", 11) \
277 return Token::SHR; in BinaryOpForAssignment()
296 return (BIT_OR <= op && op <= SHR) || op == BIT_NOT; in IsBitOp()
308 return (SHL <= op) && (op <= SHR); in IsShiftOp()
/external/llvm/test/Transforms/InstSimplify/
Dshift-128-kb.ll15 ; CHECK-NEXT: [[SHR:%.*]] = ashr i128 [[SHL]], [[SH_PROM]]
16 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i128 [[SHR]], 0
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstSimplify/
Dshift-128-kb.ll15 ; CHECK-NEXT: [[SHR:%.*]] = ashr i128 [[SHL]], [[SH_PROM]]
16 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i128 [[SHR]], 0
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/IndVarSimplify/
Dpr32045.ll9 ; CHECK-NEXT: [[SHR:%.*]] = ashr i32 %neg3, -1
10 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 0, [[SHR]]
/external/llvm/test/CodeGen/AMDGPU/
Dscalar_to_vector.ll7 ; SI: v_lshrrev_b32_e32 [[SHR:v[0-9]+]], 16, [[VAL]]
8 ; SI: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[SHR]]
9 ; SI: v_or_b32_e32 v[[OR:[0-9]+]], [[SHL]], [[SHR]]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopIdiom/X86/
Dctlz.ll425 ; LZCNT-NEXT: [[X_ADDR_05:%.*]] = phi i32 [ [[X]], [[WHILE_BODY_LR_PH]] ], [ [[SHR:%.*]], [[WHIL…
426 ; LZCNT-NEXT: [[SHR]] = ashr i32 [[X_ADDR_05]], 1
429 ; LZCNT-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[SHR]], 0
447 ; NOLZCNT-NEXT: [[X_ADDR_05:%.*]] = phi i32 [ [[X]], [[WHILE_BODY_LR_PH]] ], [ [[SHR:%.*]], [[WH…
448 ; NOLZCNT-NEXT: [[SHR]] = ashr i32 [[X_ADDR_05]], 1
451 ; NOLZCNT-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[SHR]], 0
505 ; ALL-NEXT: [[N_ADDR_0:%.*]] = phi i32 [ [[N:%.*]], [[ENTRY:%.*]] ], [ [[SHR:%.*]], [[WHILE_COND…
507 ; ALL-NEXT: [[SHR]] = lshr i32 [[N_ADDR_0]], 1
508 ; ALL-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[SHR]], 0
/external/openssh/
Dblocks.c40 #define SHR(x,c) ((x) >> (c)) macro
47 #define sigma0(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHR(x,7))
48 #define sigma1(x) (ROTR(x,19) ^ ROTR(x,61) ^ SHR(x,6))
/external/llvm/test/CodeGen/AArch64/
Dbitfield-extract.ll84 ; SHR with multiple uses is fine as SXTH and SBFX are both aliases of SBFM.
85 ; However, allowing the transformation means the SHR and SBFX can execute in
/external/skia/src/sksl/
DSkSLLexer.h140 #undef SHR
141 SHR, enumerator
/external/skqp/src/sksl/
DSkSLLexer.h144 #undef SHR
145 SHR, enumerator

123456