/system/core/libpixelflinger/include/private/pixelflinger/ |
D | ggl_fixed.h | 109 inline GGLfixed gglMulx(GGLfixed x, GGLfixed y, int shift) CONST; 110 __attribute__((always_inline)) inline GGLfixed gglMulx(GGLfixed x, GGLfixed y, int shift) { in gglMulx() argument 112 if (__builtin_constant_p(shift)) { in gglMulx() 117 : "%[x]"(x), [y]"r"(y), [lshift] "I"(32-shift), [rshift] "I"(shift) in gglMulx() 125 : "%[x]"(x), [y]"r"(y), [lshift] "r"(32-shift), [rshift] "r"(shift) in gglMulx() 132 inline GGLfixed gglMulAddx(GGLfixed x, GGLfixed y, GGLfixed a, int shift) CONST; 134 int shift) { in gglMulAddx() argument 136 if (__builtin_constant_p(shift)) { in gglMulAddx() 141 : "%[x]"(x), [y]"r"(y), [a]"r"(a), [lshift] "I"(32-shift), [rshift] "I"(shift) in gglMulAddx() 148 : "%[x]"(x), [y]"r"(y), [a]"r"(a), [lshift] "r"(32-shift), [rshift] "r"(shift) in gglMulAddx() [all …]
|
/system/core/libpixelflinger/arch-mips/ |
D | t32cb16blend.S | 36 .macro pixel dreg src fb shift argument 52 ext $t8,\dreg,\shift+6+5,5 # dst[\shift:15..11] 54 ext $t0,\dreg,\shift+5,6 # start green extraction dst[\shift:10..5] 58 .if \shift!=0 59 sll $t8,\shift+11 67 ext $t0,\dreg,\shift,5 # start blue extraction dst[\shift:4..0] 74 sll $t8, $t8, \shift+5 79 sll $t8, $t8, \shift 94 .macro pixel dreg src fb shift argument 119 srl $t8,\dreg,\shift+6+5 [all …]
|
D | col32cb16blend.S | 17 .macro pixel dreg src f sR sG sB shift argument 21 ext $t4,\src,\shift+11,5 25 ext $t5,\src,\shift+5,6 29 ext $t6,\src,\shift,5 33 srl $t4,\src,\shift+11 38 srl $t5,\src,\shift+5 43 srl $t6,\src,\shift
|
/system/netd/libnetdutils/include/netdutils/ |
D | Math.h | 27 inline constexpr const T mask(const int shift) { in mask() argument 28 return (1 << shift) - 1; in mask() 33 inline constexpr const T align(const T& x, const int shift) { in align() argument 34 return (x + mask<T>(shift)) & ~mask<T>(shift); in align()
|
/system/core/libpixelflinger/arch-mips64/ |
D | t32cb16blend.S | 34 .macro pixel dreg src fb shift argument 46 ext $t8,\dreg,\shift+6+5,5 # dst[\shift:15..11] 48 ext $a4,\dreg,\shift+5,6 # start green extraction dst[\shift:10..5] 52 .if \shift!=0 53 sll $t8,\shift+11 # dst[\shift:15..11] 61 ext $a4,\dreg,\shift,5 # start blue extraction dst[\shift:4..0] 68 sll $t8, $t8, \shift+5 # finish green insertion dst[\shift:10..5] 73 sll $t8, $t8, \shift
|
D | col32cb16blend.S | 17 .macro pixel dreg src f sR sG sB shift argument 20 .if \shift < 32 21 dext $t0,\src,\shift+11,5 23 dextu $t0,\src,\shift+11,5 28 .if \shift < 32 29 dext $t1,\src,\shift+5,6 31 dextu $t1,\src,\shift+5,6 36 .if \shift < 32 37 dext $t2,\src,\shift,5 39 dextu $t2,\src,\shift,5
|
/system/core/libpixelflinger/tests/gglmul/ |
D | gglmul_test.cpp | 104 int shift; member 130 test->x, test->y, test->shift); in gglMulx_test() 131 actual = gglMulx(test->x, test->y, test->shift); in gglMulx_test() 133 ((int64_t)test->x * test->y + (1 << (test->shift-1))) >> test->shift; in gglMulx_test() 146 int shift; member 173 test->x, test->y, test->shift, test->a); in gglMulAddx_test() 174 actual = gglMulAddx(test->x, test->y,test->a, test->shift); in gglMulAddx_test() 175 expected = (((int64_t)test->x * test->y) >> test->shift) + test->a; in gglMulAddx_test() 189 int shift; member 216 test->x, test->y, test->shift, test->a); in gglMulSubx_test() [all …]
|
/system/core/libpixelflinger/codeflinger/ |
D | Arm64Assembler.cpp | 373 uint32_t shift; in dataProcessingCommon() local 379 shift = mAddrMode.reg_imm_type; in dataProcessingCommon() 385 shift = 0; in dataProcessingCommon() 397 case opADD: *mPC++ = A64_ADD_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon() 398 case opAND: *mPC++ = A64_AND_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon() 399 case opORR: *mPC++ = A64_ORR_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon() 400 case opMVN: *mPC++ = A64_ORN_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon() 401 case opSUB: *mPC++ = A64_SUB_W(Rd, Rn, Rm, shift, amount, s);break; in dataProcessingCommon() 884 uint32_t ArmToArm64Assembler::reg_imm(int Rm, int type, uint32_t shift) in reg_imm() argument 888 mAddrMode.reg_imm_shift = shift; in reg_imm() [all …]
|
D | Arm64Assembler.h | 83 virtual uint32_t reg_imm(int Rm, int type, uint32_t shift); 90 virtual uint32_t reg_scale_pre(int Rm, int type=0, uint32_t shift=0, int W=0); 91 virtual uint32_t reg_scale_post(int Rm, int type=0, uint32_t shift=0); 210 uint32_t imm, uint32_t shift = 0); 212 uint32_t imm, uint32_t shift = 0); 215 uint32_t Rm, uint32_t shift = 0, uint32_t amount = 0); 217 uint32_t shift = 0, uint32_t amount = 0); 219 uint32_t shift = 0, uint32_t amount = 0, 222 uint32_t Rm, uint32_t shift = 0, uint32_t amount = 0); 224 uint32_t Rm, uint32_t shift = 0, uint32_t amount = 0); [all …]
|
D | load_store.cpp | 335 const int shift = (GGL_DITHER_BITS - (sbits-dbits)); in downshift() local 336 if (shift>0) ADD(AL, 0, ireg, ireg, reg_imm(dither.reg, LSR, shift)); in downshift() 337 else if (shift<0) ADD(AL, 0, ireg, ireg, reg_imm(dither.reg, LSL,-shift)); in downshift() 343 int shift = sh-dbits; in downshift() local 345 MOV(AL, 0, ireg, reg_imm(s.reg, LSR, shift)); in downshift() 353 MOV(AL, 0, d.reg, reg_imm(s.reg, LSR, shift)); in downshift() 355 ORR(AL, 0, d.reg, d.reg, reg_imm(s.reg, LSR, shift)); in downshift() 359 int shift = sh-dh; in downshift() local 360 if (shift>0) { in downshift() 362 MOV(AL, 0, d.reg, reg_imm(s.reg, LSR, shift)); in downshift() [all …]
|
D | texturing.cpp | 505 const int shift = 31 - gglClz(tmu.format.size); in build_textures() local 531 MOV(LT, 0, width, imm(1 << shift)); in build_textures() 532 if (shift) in build_textures() 533 MOV(GE, 0, width, reg_imm(width, LSL, shift)); in build_textures() 552 MOV(GT, 0, width, imm(1 << shift)); in build_textures() 566 MOV(LT, 0, height, imm(1 << shift)); in build_textures() 567 if (shift) in build_textures() 568 MOV(GE, 0, height, reg_imm(height, LSL, shift)); in build_textures() 576 if (shift) { in build_textures() 577 MOV(GT, 0, height, reg_imm(stride, LSL, shift)); in build_textures() [all …]
|
D | blending.cpp | 448 const int shift = fragment.size() - fb.size(); in build_blendFOneMinusF() local 449 if (shift>0) RSB(AL, 0, diff.reg, fb.reg, reg_imm(fragment.reg, LSR, shift)); in build_blendFOneMinusF() 450 else if (shift<0) RSB(AL, 0, diff.reg, fb.reg, reg_imm(fragment.reg, LSL,-shift)); in build_blendFOneMinusF() 466 const int shift = fragment.size() - fb.size(); in build_blendOneMinusFF() local 467 if (shift>0) SUB(AL, 0, diff.reg, fb.reg, reg_imm(fragment.reg, LSR, shift)); in build_blendOneMinusFF() 468 else if (shift<0) SUB(AL, 0, diff.reg, fb.reg, reg_imm(fragment.reg, LSL,-shift)); in build_blendOneMinusFF() 641 const int shift = src.size() - dst.size(); in component_add() local 642 if (!shift) { in component_add() 645 ADD(AL, 0, d.reg, src.reg, reg_imm(dst.reg, LSL, shift)); in component_add() 652 d.l = shift; in component_add()
|
D | ARMAssemblerProxy.cpp | 93 uint32_t ARMAssemblerProxy::reg_imm(int Rm, int type, uint32_t shift) in reg_imm() argument 95 return mTarget->reg_imm(Rm, type, shift); in reg_imm() 122 uint32_t ARMAssemblerProxy::reg_scale_pre(int Rm, int type, uint32_t shift, int W) in reg_scale_pre() argument 124 return mTarget->reg_scale_pre(Rm, type, shift, W); in reg_scale_pre() 127 uint32_t ARMAssemblerProxy::reg_scale_post(int Rm, int type, uint32_t shift) in reg_scale_post() argument 129 return mTarget->reg_scale_post(Rm, type, shift); in reg_scale_post()
|
D | ARMAssembler.h | 70 virtual uint32_t reg_imm(int Rm, int type, uint32_t shift); 79 virtual uint32_t reg_scale_pre(int Rm, int type=0, uint32_t shift=0, int W=0); 80 virtual uint32_t reg_scale_post(int Rm, int type=0, uint32_t shift=0);
|
D | ARMAssemblerProxy.h | 59 virtual uint32_t reg_imm(int Rm, int type, uint32_t shift); 68 virtual uint32_t reg_scale_pre(int Rm, int type=0, uint32_t shift=0, int W=0); 69 virtual uint32_t reg_scale_post(int Rm, int type=0, uint32_t shift=0);
|
D | GGLAssembler.cpp | 567 const int shift = fragment.h <= 8 ? 0 : fragment.h-8; in build_incoming_component() local 572 if (shift) { in build_incoming_component() 574 reg_imm(mAlphaSource.reg, LSR, shift)); in build_incoming_component() 581 if (shift) { in build_incoming_component() 583 reg_imm(fragment.reg, LSR, shift)); in build_incoming_component() 588 mAlphaSource.s -= shift; in build_incoming_component() 701 const int shift = GGL_COLOR_BITS-fragment.size(); in build_alpha_test() local 703 if (shift) CMP(AL, fragment.reg, reg_imm(ref, LSR, shift)); in build_alpha_test() 917 int shift = rot + bitpos; in build_and_immediate() local 921 int32_t newMask = (m<<shift) | (m>>(32-shift)); in build_and_immediate()
|
D | ARMAssembler.cpp | 496 uint32_t ARMAssembler::reg_imm(int Rm, int type, uint32_t shift) in reg_imm() argument 498 return ((shift&0x1F)<<7) | ((type&0x3)<<5) | (Rm&0xF); in reg_imm() 532 uint32_t shift, int W) in reg_scale_pre() argument 536 reg_imm(abs(Rm), type, shift); in reg_scale_pre() 539 uint32_t ARMAssembler::reg_scale_post(int Rm, int type, uint32_t shift) in reg_scale_post() argument 541 return (1<<25) | (((uint32_t(Rm)>>31)^1)<<23) | reg_imm(abs(Rm), type, shift); in reg_scale_post()
|
D | MIPS64Assembler.h | 73 virtual uint32_t reg_imm(int Rm, int type, uint32_t shift); 82 virtual uint32_t reg_scale_pre(int Rm, int type=0, uint32_t shift=0, int W=0); 83 virtual uint32_t reg_scale_post(int Rm, int type=0, uint32_t shift=0);
|
D | ARMAssemblerInterface.h | 81 virtual uint32_t reg_imm(int Rm, int type, uint32_t shift) = 0; 90 virtual uint32_t reg_scale_pre(int Rm, int type=0, uint32_t shift=0, int W=0) = 0; 91 virtual uint32_t reg_scale_post(int Rm, int type=0, uint32_t shift=0) = 0;
|
/system/core/libunwindstack/ |
D | DwarfMemory.cpp | 49 uint64_t shift = 0; in ReadULEB128() local 55 cur_value += static_cast<uint64_t>(byte & 0x7f) << shift; in ReadULEB128() 56 shift += 7; in ReadULEB128() 64 uint64_t shift = 0; in ReadSLEB128() local 70 cur_value += static_cast<uint64_t>(byte & 0x7f) << shift; in ReadSLEB128() 71 shift += 7; in ReadSLEB128() 75 cur_value |= static_cast<uint64_t>(-1) << shift; in ReadSLEB128()
|
/system/chre/apps/chqts/src/shared/ |
D | nano_string.cc | 80 for (size_t i = 0, shift = 28; i < 8; i++, shift -= 4) { in uint32ToHexAscii() local 81 buffer[2 + i] = lookup[(value >> shift) & 0xF]; in uint32ToHexAscii()
|
/system/core/libpixelflinger/ |
D | buffer.cpp | 343 const int shift = (GGL_DITHER_BITS - (sbits-dbits)); in downshift_component() local 344 if (shift >= 0) v += (dither >> shift) << sl; in downshift_component() 345 else v += (dither << (-shift)) << sl; in downshift_component() 374 int shift = dh-sh; in downshift_component() local 375 in |= v<<shift; in downshift_component() 380 int shift = sh-dh; in downshift_component() local 381 if (shift > 0) in |= v>>shift; in downshift_component() 382 else if (shift < 0) in |= v<<shift; in downshift_component()
|
D | fixed.cpp | 62 int shift; in gglRecipQ() local 63 x = gglRecipQNormalized(x, &shift); in gglRecipQ() 64 shift += 16-q; in gglRecipQ() 65 if (shift > 0) in gglRecipQ() 66 x += 1L << (shift-1); // rounding in gglRecipQ() 67 x >>= shift; in gglRecipQ()
|
/system/extras/simpleperf/ |
D | utils.cpp | 392 int shift = 0; in ConvertBytesToValue() local 395 result |= tmp << shift; in ConvertBytesToValue() 396 shift += 8; in ConvertBytesToValue()
|
/system/connectivity/wifilogd/ |
D | Android.bp | 59 "-Wno-shift-sign-overflow",
|