/system/core/libpixelflinger/include/private/pixelflinger/ |
D | ggl_fixed.h | 110 inline GGLfixed gglMulx(GGLfixed x, GGLfixed y, int shift) CONST; 111 inline GGLfixed gglMulx(GGLfixed x, GGLfixed y, int shift) { in gglMulx() argument 113 if (__builtin_constant_p(shift)) { in gglMulx() 118 : "%[x]"(x), [y]"r"(y), [lshift] "I"(32-shift), [rshift] "I"(shift) in gglMulx() 126 : "%[x]"(x), [y]"r"(y), [lshift] "r"(32-shift), [rshift] "r"(shift) in gglMulx() 133 inline GGLfixed gglMulAddx(GGLfixed x, GGLfixed y, GGLfixed a, int shift) CONST; 134 inline GGLfixed gglMulAddx(GGLfixed x, GGLfixed y, GGLfixed a, int shift) { in gglMulAddx() argument 136 if (__builtin_constant_p(shift)) { in gglMulAddx() 141 : "%[x]"(x), [y]"r"(y), [a]"r"(a), [lshift] "I"(32-shift), [rshift] "I"(shift) in gglMulAddx() 148 : "%[x]"(x), [y]"r"(y), [a]"r"(a), [lshift] "r"(32-shift), [rshift] "r"(shift) in gglMulAddx() [all …]
|
/system/core/libpixelflinger/arch-mips/ |
D | t32cb16blend.S | 36 .macro pixel dreg src fb shift argument 52 ext $t8,\dreg,\shift+6+5,5 # dst[\shift:15..11] 54 ext $t0,\dreg,\shift+5,6 # start green extraction dst[\shift:10..5] 58 .if \shift!=0 59 sll $t8,\shift+11 67 ext $t0,\dreg,\shift,5 # start blue extraction dst[\shift:4..0] 74 sll $t8, $t8, \shift+5 79 sll $t8, $t8, \shift 94 .macro pixel dreg src fb shift argument 119 srl $t8,\dreg,\shift+6+5 [all …]
|
D | col32cb16blend.S | 17 .macro pixel dreg src f sR sG sB shift argument 21 ext $t4,\src,\shift+11,5 25 ext $t5,\src,\shift+5,6 29 ext $t6,\src,\shift,5 33 srl $t4,\src,\shift+11 38 srl $t5,\src,\shift+5 43 srl $t6,\src,\shift
|
/system/core/libpixelflinger/arch-mips64/ |
D | t32cb16blend.S | 34 .macro pixel dreg src fb shift argument 46 ext $t8,\dreg,\shift+6+5,5 # dst[\shift:15..11] 48 ext $a4,\dreg,\shift+5,6 # start green extraction dst[\shift:10..5] 52 .if \shift!=0 53 sll $t8,\shift+11 # dst[\shift:15..11] 61 ext $a4,\dreg,\shift,5 # start blue extraction dst[\shift:4..0] 68 sll $t8, $t8, \shift+5 # finish green insertion dst[\shift:10..5] 73 sll $t8, $t8, \shift
|
D | col32cb16blend.S | 17 .macro pixel dreg src f sR sG sB shift argument 20 .if \shift < 32 21 dext $t0,\src,\shift+11,5 23 dextu $t0,\src,\shift+11,5 28 .if \shift < 32 29 dext $t1,\src,\shift+5,6 31 dextu $t1,\src,\shift+5,6 36 .if \shift < 32 37 dext $t2,\src,\shift,5 39 dextu $t2,\src,\shift,5
|
/system/core/libpixelflinger/tests/gglmul/ |
D | gglmul_test.cpp | 104 int shift; member 130 test->x, test->y, test->shift); in gglMulx_test() 131 actual = gglMulx(test->x, test->y, test->shift); in gglMulx_test() 133 ((int64_t)test->x * test->y + (1 << (test->shift-1))) >> test->shift; in gglMulx_test() 146 int shift; member 173 test->x, test->y, test->shift, test->a); in gglMulAddx_test() 174 actual = gglMulAddx(test->x, test->y,test->a, test->shift); in gglMulAddx_test() 175 expected = (((int64_t)test->x * test->y) >> test->shift) + test->a; in gglMulAddx_test() 189 int shift; member 216 test->x, test->y, test->shift, test->a); in gglMulSubx_test() [all …]
|
/system/core/libpixelflinger/codeflinger/ |
D | Arm64Assembler.cpp | 374 uint32_t shift; in dataProcessingCommon() local 380 shift = mAddrMode.reg_imm_type; in dataProcessingCommon() 386 shift = 0; in dataProcessingCommon() 398 case opADD: *mPC++ = A64_ADD_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon() 399 case opAND: *mPC++ = A64_AND_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon() 400 case opORR: *mPC++ = A64_ORR_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon() 401 case opMVN: *mPC++ = A64_ORN_W(Rd, Rn, Rm, shift, amount); break; in dataProcessingCommon() 402 case opSUB: *mPC++ = A64_SUB_W(Rd, Rn, Rm, shift, amount, s);break; in dataProcessingCommon() 885 uint32_t ArmToArm64Assembler::reg_imm(int Rm, int type, uint32_t shift) in reg_imm() argument 889 mAddrMode.reg_imm_shift = shift; in reg_imm() [all …]
|
D | Arm64Assembler.h | 83 virtual uint32_t reg_imm(int Rm, int type, uint32_t shift); 90 virtual uint32_t reg_scale_pre(int Rm, int type=0, uint32_t shift=0, int W=0); 91 virtual uint32_t reg_scale_post(int Rm, int type=0, uint32_t shift=0); 210 uint32_t imm, uint32_t shift = 0); 212 uint32_t imm, uint32_t shift = 0); 215 uint32_t Rm, uint32_t shift = 0, uint32_t amount = 0); 217 uint32_t shift = 0, uint32_t amount = 0); 219 uint32_t shift = 0, uint32_t amount = 0, 222 uint32_t Rm, uint32_t shift = 0, uint32_t amount = 0); 224 uint32_t Rm, uint32_t shift = 0, uint32_t amount = 0); [all …]
|
D | load_store.cpp | 332 const int shift = (GGL_DITHER_BITS - (sbits-dbits)); in downshift() local 333 if (shift>0) ADD(AL, 0, ireg, ireg, reg_imm(dither.reg, LSR, shift)); in downshift() 334 else if (shift<0) ADD(AL, 0, ireg, ireg, reg_imm(dither.reg, LSL,-shift)); in downshift() 340 int shift = sh-dbits; in downshift() local 342 MOV(AL, 0, ireg, reg_imm(s.reg, LSR, shift)); in downshift() 350 MOV(AL, 0, d.reg, reg_imm(s.reg, LSR, shift)); in downshift() 352 ORR(AL, 0, d.reg, d.reg, reg_imm(s.reg, LSR, shift)); in downshift() 356 int shift = sh-dh; in downshift() local 357 if (shift>0) { in downshift() 359 MOV(AL, 0, d.reg, reg_imm(s.reg, LSR, shift)); in downshift() [all …]
|
D | texturing.cpp | 511 const int shift = 31 - gglClz(tmu.format.size); in build_textures() local 537 MOV(LT, 0, width, imm(1 << shift)); in build_textures() 538 if (shift) in build_textures() 539 MOV(GE, 0, width, reg_imm(width, LSL, shift)); in build_textures() 558 MOV(GT, 0, width, imm(1 << shift)); in build_textures() 572 MOV(LT, 0, height, imm(1 << shift)); in build_textures() 573 if (shift) in build_textures() 574 MOV(GE, 0, height, reg_imm(height, LSL, shift)); in build_textures() 582 if (shift) { in build_textures() 583 MOV(GT, 0, height, reg_imm(stride, LSL, shift)); in build_textures() [all …]
|
D | blending.cpp | 446 const int shift = fragment.size() - fb.size(); in build_blendFOneMinusF() local 447 if (shift>0) RSB(AL, 0, diff.reg, fb.reg, reg_imm(fragment.reg, LSR, shift)); in build_blendFOneMinusF() 448 else if (shift<0) RSB(AL, 0, diff.reg, fb.reg, reg_imm(fragment.reg, LSL,-shift)); in build_blendFOneMinusF() 464 const int shift = fragment.size() - fb.size(); in build_blendOneMinusFF() local 465 if (shift>0) SUB(AL, 0, diff.reg, fb.reg, reg_imm(fragment.reg, LSR, shift)); in build_blendOneMinusFF() 466 else if (shift<0) SUB(AL, 0, diff.reg, fb.reg, reg_imm(fragment.reg, LSL,-shift)); in build_blendOneMinusFF() 639 const int shift = src.size() - dst.size(); in component_add() local 640 if (!shift) { in component_add() 643 ADD(AL, 0, d.reg, src.reg, reg_imm(dst.reg, LSL, shift)); in component_add() 650 d.l = shift; in component_add()
|
D | ARMAssemblerProxy.cpp | 93 uint32_t ARMAssemblerProxy::reg_imm(int Rm, int type, uint32_t shift) in reg_imm() argument 95 return mTarget->reg_imm(Rm, type, shift); in reg_imm() 122 uint32_t ARMAssemblerProxy::reg_scale_pre(int Rm, int type, uint32_t shift, int W) in reg_scale_pre() argument 124 return mTarget->reg_scale_pre(Rm, type, shift, W); in reg_scale_pre() 127 uint32_t ARMAssemblerProxy::reg_scale_post(int Rm, int type, uint32_t shift) in reg_scale_post() argument 129 return mTarget->reg_scale_post(Rm, type, shift); in reg_scale_post()
|
D | ARMAssemblerProxy.h | 59 virtual uint32_t reg_imm(int Rm, int type, uint32_t shift); 68 virtual uint32_t reg_scale_pre(int Rm, int type=0, uint32_t shift=0, int W=0); 69 virtual uint32_t reg_scale_post(int Rm, int type=0, uint32_t shift=0);
|
D | ARMAssembler.h | 70 virtual uint32_t reg_imm(int Rm, int type, uint32_t shift); 79 virtual uint32_t reg_scale_pre(int Rm, int type=0, uint32_t shift=0, int W=0); 80 virtual uint32_t reg_scale_post(int Rm, int type=0, uint32_t shift=0);
|
D | GGLAssembler.cpp | 569 const int shift = fragment.h <= 8 ? 0 : fragment.h-8; in build_incoming_component() local 574 if (shift) { in build_incoming_component() 576 reg_imm(mAlphaSource.reg, LSR, shift)); in build_incoming_component() 583 if (shift) { in build_incoming_component() 585 reg_imm(fragment.reg, LSR, shift)); in build_incoming_component() 590 mAlphaSource.s -= shift; in build_incoming_component() 703 const int shift = GGL_COLOR_BITS-fragment.size(); in build_alpha_test() local 705 if (shift) CMP(AL, fragment.reg, reg_imm(ref, LSR, shift)); in build_alpha_test() 919 int shift = rot + bitpos; in build_and_immediate() local 923 int32_t newMask = (m<<shift) | (m>>(32-shift)); in build_and_immediate()
|
D | ARMAssembler.cpp | 510 uint32_t ARMAssembler::reg_imm(int Rm, int type, uint32_t shift) in reg_imm() argument 512 return ((shift&0x1F)<<7) | ((type&0x3)<<5) | (Rm&0xF); in reg_imm() 546 uint32_t shift, int W) in reg_scale_pre() argument 550 reg_imm(abs(Rm), type, shift); in reg_scale_pre() 553 uint32_t ARMAssembler::reg_scale_post(int Rm, int type, uint32_t shift) in reg_scale_post() argument 555 return (1<<25) | (((uint32_t(Rm)>>31)^1)<<23) | reg_imm(abs(Rm), type, shift); in reg_scale_post()
|
D | MIPS64Assembler.h | 73 virtual uint32_t reg_imm(int Rm, int type, uint32_t shift); 82 virtual uint32_t reg_scale_pre(int Rm, int type=0, uint32_t shift=0, int W=0); 83 virtual uint32_t reg_scale_post(int Rm, int type=0, uint32_t shift=0);
|
D | ARMAssemblerInterface.h | 81 virtual uint32_t reg_imm(int Rm, int type, uint32_t shift) = 0; 90 virtual uint32_t reg_scale_pre(int Rm, int type=0, uint32_t shift=0, int W=0) = 0; 91 virtual uint32_t reg_scale_post(int Rm, int type=0, uint32_t shift=0) = 0;
|
D | MIPSAssembler.h | 68 virtual uint32_t reg_imm(int Rm, int type, uint32_t shift); 77 virtual uint32_t reg_scale_pre(int Rm, int type=0, uint32_t shift=0, int W=0); 78 virtual uint32_t reg_scale_post(int Rm, int type=0, uint32_t shift=0);
|
D | MIPS64Assembler.cpp | 227 uint32_t ArmToMips64Assembler::reg_imm(int Rm, int type, uint32_t shift) in reg_imm() argument 231 amode.value = shift; in reg_imm() 271 uint32_t shift, int W) in reg_scale_pre() argument 273 LOG_ALWAYS_FATAL_IF(W | type | shift, "reg_scale_pre adv modes not yet implemented"); in reg_scale_pre() 282 uint32_t ArmToMips64Assembler::reg_scale_post(int Rm, int type, uint32_t shift) in reg_scale_post() argument
|
D | MIPSAssembler.cpp | 234 uint32_t ArmToMipsAssembler::reg_imm(int Rm, int type, uint32_t shift) in reg_imm() argument 238 amode.value = shift; in reg_imm() 278 uint32_t shift, int W) in reg_scale_pre() argument 280 LOG_ALWAYS_FATAL_IF(W | type | shift, "reg_scale_pre adv modes not yet implemented"); in reg_scale_pre() 289 uint32_t ArmToMipsAssembler::reg_scale_post(int Rm, int type, uint32_t shift) in reg_scale_post() argument
|
/system/core/libpixelflinger/ |
D | buffer.cpp | 338 const int shift = (GGL_DITHER_BITS - (sbits-dbits)); in downshift_component() local 339 if (shift >= 0) v += (dither >> shift) << sl; in downshift_component() 340 else v += (dither << (-shift)) << sl; in downshift_component() 369 int shift = dh-sh; in downshift_component() local 370 in |= v<<shift; in downshift_component() 375 int shift = sh-dh; in downshift_component() local 376 if (shift > 0) in |= v>>shift; in downshift_component() 377 else if (shift < 0) in |= v<<shift; in downshift_component()
|
D | fixed.cpp | 62 int shift; in gglRecipQ() local 63 x = gglRecipQNormalized(x, &shift); in gglRecipQ() 64 shift += 16-q; in gglRecipQ() 65 if (shift > 0) in gglRecipQ() 66 x += 1L << (shift-1); // rounding in gglRecipQ() 67 x >>= shift; in gglRecipQ()
|
D | trap.cpp | 696 const int shift = TRI_ITERATORS_BITS - TRI_FRACTION_BITS; in edge_setup() local 701 edge->x = (x1 << shift) + (1LU << (TRI_ITERATORS_BITS-1)); in edge_setup() 882 const int shift = FIXED_BITS - TRI_FRACTION_BITS; in aa_edge_setup() local 885 edge->x = x1 << shift; in aa_edge_setup() 1002 const int32_t shift = TRI_FRACTION_BITS + TRI_ITERATORS_BITS - FIXED_BITS; in aapolyx() local 1006 GGLfixed l_min = gglMulAddx(left->x_incr, y - left->y_top, left->x, shift); in aapolyx() 1013 GGLfixed r_min = gglMulAddx(right->x_incr, y - right->y_top, right->x, shift); in aapolyx()
|
/system/connectivity/shill/bin/ |
D | ff_debug | 164 shift # move forward to the <level> argument if specified 180 shift
|