/external/neven/Embedded/common/src/b_TensorEm/ |
D | Flt16Alt2D.c | 45 bts_Flt16Vec2D_init( &ptrA->vecE ); in bts_Flt16Alt2D_init() 53 bts_Flt16Vec2D_exit( &ptrA->vecE ); in bts_Flt16Alt2D_exit() 71 bts_Flt16Vec2D_copy( &ptrA->vecE, &srcPtrA->vecE ); in bts_Flt16Alt2D_copy() 79 if( ! bts_Flt16Vec2D_equal( &ptrA->vecE, &srcPtrA->vecE ) ) return FALSE; in bts_Flt16Alt2D_equal() 158 altL.vecE = bts_Flt16Vec2D_sub( *centerPtrA, bts_Flt16Mat2D_mapFlt( &altL.matE, centerPtrA ) ); in bts_Flt16Alt2D_createRotation() 170 altL.vecE = bts_Flt16Vec2D_sub( *centerPtrA, bts_Flt16Mat2D_mapFlt( &altL.matE, centerPtrA ) ); in bts_Flt16Alt2D_createScale() 183 altL.vecE = bts_Flt16Vec2D_sub( *centerPtrA, bts_Flt16Mat2D_mapFlt( &altL.matE, centerPtrA ) ); in bts_Flt16Alt2D_createRigid() 219 altL.vecE = bts_Flt16Vec2D_add( altL.vecE, transL ); in bts_Flt16Alt2D_createRigidMap() 237 altL.vecE = bts_Flt16Vec2D_create16( xA, yA, vecBbpA ); in bts_Flt16Alt2D_create16() 254 altL.vecE = bts_Flt16Vec2D_create32( xA, yA, vecBbpA ); in bts_Flt16Alt2D_create32() [all …]
|
D | Flt16Alt3D.c | 45 bts_Flt16Vec3D_init( &ptrA->vecE ); in bts_Flt16Alt3D_init() 53 bts_Flt16Vec3D_exit( &ptrA->vecE ); in bts_Flt16Alt3D_exit() 144 altL.vecE = bts_Flt16Vec3D_sub( *centerPtrA, bts_Flt16Mat3D_mapFlt( &altL.matE, centerPtrA ) ); in bts_Flt16Alt3D_createScale() 155 altL.vecE = bts_Flt16Vec3D_sub( *centerPtrA, bts_Flt16Mat3D_mapFlt( &altL.matE, centerPtrA ) ); in bts_Flt16Alt3D_createLinear() 174 altL.vecE = bts_Flt16Vec3D_create16( xA, yA, zA, vecBbpA ); in bts_Flt16Alt3D_create16() 193 altL.vecE = bts_Flt16Vec3D_create32( xA, yA, zA, vecBbpA ); in bts_Flt16Alt3D_create32() 203 int32 shiftL = altPtrA->vecE.bbpE - vecL.bbpE; in bts_Flt16Alt3D_mapFlt() 207 vecL.xE += ( ( altPtrA->vecE.xE >> sh1L ) + 1 ) >> 1; in bts_Flt16Alt3D_mapFlt() 208 vecL.yE += ( ( altPtrA->vecE.yE >> sh1L ) + 1 ) >> 1; in bts_Flt16Alt3D_mapFlt() 209 vecL.zE += ( ( altPtrA->vecE.zE >> sh1L ) + 1 ) >> 1; in bts_Flt16Alt3D_mapFlt() [all …]
|
D | CompactAlt.c | 47 bbs_Int16Arr_init( cpA, &ptrA->vecE ); in bts_CompactAlt_init() 57 bbs_Int16Arr_exit( cpA, &ptrA->vecE ); in bts_CompactAlt_exit() 95 bbs_Int16Arr_create( cpA, &ptrA->vecE, heightA, mspA ); in bts_CompactAlt_create() 96 bbs_Int16Arr_fill( cpA, &ptrA->vecE, 0 ); in bts_CompactAlt_create() 107 bbs_Int16Arr_copy( cpA, &ptrA->vecE, &srcPtrA->vecE ); in bts_CompactAlt_copy() 127 + bbs_Int16Arr_memSize( cpA, &ptrA->vecE ) in bts_CompactAlt_memSize() 141 memPtrA += bbs_Int16Arr_memWrite( cpA, &ptrA->vecE, memPtrA ); in bts_CompactAlt_memWrite() 158 memPtrA += bbs_Int16Arr_memRead( cpA, &ptrA->vecE, memPtrA, mspA ); in bts_CompactAlt_memRead() 196 if( ptrA->vecE.sizeE > 0 ) in bts_CompactAlt_map() 198 const int16* vecL = ptrA->vecE.arrPtrE; in bts_CompactAlt_map()
|
D | MapSequence.c | 45 bts_Flt16Vec_init( cpA, &ptrA->vecE ); in bts_MapSequence_init() 62 bts_Flt16Vec_exit( cpA, &ptrA->vecE ); in bts_MapSequence_exit() 175 bts_Flt16Vec_create( cpA, &ptrA->vecE, ptrA->vecSizeE, sspL ); in bts_MapSequence_memRead() 246 struct bts_Flt16Vec* vp1L = &ptrL->vecE; in bts_MapSequence_map()
|
D | Cluster3D.c | 328 int32 x0L = altA.vecE.xE; in bts_Cluster3D_transform() 329 int32 y0L = altA.vecE.yE; in bts_Cluster3D_transform() 330 int32 z0L = altA.vecE.zE; in bts_Cluster3D_transform() 332 int32 shiftL = altA.matE.bbpE + ptrA->bbpE - altA.vecE.bbpE; in bts_Cluster3D_transform()
|
D | MapSequence.h | 51 struct bts_Flt16Vec vecE; member
|
D | CompactAlt.h | 51 struct bbs_Int16Arr vecE; member
|
D | Flt16Alt3D.h | 47 struct bts_Flt16Vec3D vecE; member
|
D | Flt16Alt2D.h | 47 struct bts_Flt16Vec2D vecE; member
|
D | Cluster2D.c | 561 altL.vecE = bts_Flt16Vec2D_sub( cqL, cpL ); in bts_Cluster2D_alt() 616 altL.vecE = bts_Flt16Vec2D_sub( cqL, cpMappedL ); in bts_Cluster2D_alt() 821 altL.vecE = bts_Flt16Vec2D_sub( cqL, cpMappedL ); in bts_Cluster2D_alt() 978 altL.vecE = bts_Flt16Vec2D_sub( cqL, cpMappedL ); in bts_Cluster2D_alt()
|
/external/neven/Embedded/common/src/b_ImageEm/ |
D | UInt16ByteImage.c | 419 shiftL = invAlt2DL.vecE.bbpE - bbpL; in bim_UInt16ByteImage_warp() 422 txL = ( int32 )invAlt2DL.vecE.xE >> shiftL; in bim_UInt16ByteImage_warp() 423 tyL = ( int32 )invAlt2DL.vecE.yE >> shiftL; in bim_UInt16ByteImage_warp() 429 if( invAlt2DL.vecE.xE > maxInt32Value8bbpL || in bim_UInt16ByteImage_warp() 430 invAlt2DL.vecE.yE > maxInt32Value8bbpL ) in bim_UInt16ByteImage_warp() 435 invAlt2DL.vecE.xE >> invAlt2DL.vecE.bbpE, in bim_UInt16ByteImage_warp() 436 invAlt2DL.vecE.yE >> invAlt2DL.vecE.bbpE, in bim_UInt16ByteImage_warp() 440 txL = ( int32 )invAlt2DL.vecE.xE << -shiftL; in bim_UInt16ByteImage_warp() 441 tyL = ( int32 )invAlt2DL.vecE.yE << -shiftL; in bim_UInt16ByteImage_warp() 444 invAlt2DL.vecE.bbpE = bbpL; in bim_UInt16ByteImage_warp() [all …]
|
D | UInt8Image.c | 606 shiftL = invAlt2DL.vecE.bbpE - bbpL; in bim_UInt8Image_warpOffs() 609 txL = invAlt2DL.vecE.xE >> shiftL; in bim_UInt8Image_warpOffs() 610 tyL = invAlt2DL.vecE.yE >> shiftL; in bim_UInt8Image_warpOffs() 616 if( invAlt2DL.vecE.xE > maxInt32Value8bbpL || in bim_UInt8Image_warpOffs() 617 invAlt2DL.vecE.yE > maxInt32Value8bbpL ) in bim_UInt8Image_warpOffs() 622 invAlt2DL.vecE.xE >> invAlt2DL.vecE.bbpE, in bim_UInt8Image_warpOffs() 623 invAlt2DL.vecE.yE >> invAlt2DL.vecE.bbpE, in bim_UInt8Image_warpOffs() 627 txL = invAlt2DL.vecE.xE << -shiftL; in bim_UInt8Image_warpOffs() 628 tyL = invAlt2DL.vecE.yE << -shiftL; in bim_UInt8Image_warpOffs()
|
D | Functions.c | 129 if( invAltL.vecE.bbpE <= 16 ) in bim_filterWarpInterpolation() 131 uint32 shlL = 16 - invAltL.vecE.bbpE; in bim_filterWarpInterpolation() 132 txL = invAltL.vecE.xE << shlL; in bim_filterWarpInterpolation() 133 tyL = invAltL.vecE.yE << shlL; in bim_filterWarpInterpolation() 137 uint32 shrL = invAltL.vecE.bbpE - 16; in bim_filterWarpInterpolation() 138 txL = ( ( invAltL.vecE.xE >> ( shrL - 1 ) ) + 1 ) >> 1; in bim_filterWarpInterpolation() 139 tyL = ( ( invAltL.vecE.yE >> ( shrL - 1 ) ) + 1 ) >> 1; in bim_filterWarpInterpolation() 461 if( invAltL.vecE.bbpE <= 16 ) in bim_filterWarpPixelReplication() 463 uint32 shlL = 16 - invAltL.vecE.bbpE; in bim_filterWarpPixelReplication() 464 txL = invAltL.vecE.xE << shlL; in bim_filterWarpPixelReplication() [all …]
|
/external/neven/Embedded/common/src/b_BitFeatureEm/ |
D | LocalScanDetector.c | 541 altL.vecE.xE -= workAreaL.x1E << altL.vecE.bbpE; in bbf_LocalScanDetector_process() 542 altL.vecE.yE -= workAreaL.y1E << altL.vecE.bbpE; in bbf_LocalScanDetector_process()
|
/external/neven/Embedded/common/src/b_APIEm/ |
D | BFFaceFinder.c | 274 altL.vecE = bts_Flt16Vec2D_create32( xL, yL, 16 ); in bpi_BFFaceFinder_process() 349 altL.vecE = bts_Flt16Vec2D_create32( xL, yL, 16 ); in bpi_BFFaceFinder_getFace()
|
/external/valgrind/VEX/priv/ |
D | guest_amd64_toIR.c | 16599 static IRTemp math_PBLENDVB_128 ( IRTemp vecE, IRTemp vecG, in math_PBLENDVB_128() argument 16617 binop(Iop_AndV128, mkexpr(vecE), mkexpr(mask)), in math_PBLENDVB_128() 16622 static IRTemp math_PBLENDVB_256 ( IRTemp vecE, IRTemp vecG, in math_PBLENDVB_256() argument 16646 binop(Iop_AndV256, mkexpr(vecE), mkexpr(mask)), in math_PBLENDVB_256() 16661 IRTemp vecE = newTemp(Ity_V128); in dis_VBLENDV_128() local 16667 assign(vecE, getXMMReg(rE)); in dis_VBLENDV_128() 16676 assign(vecE, loadLE(Ity_V128, mkexpr(addr))); in dis_VBLENDV_128() 16685 IRTemp res = math_PBLENDVB_128( vecE, vecV, vecIS4, gran, opSAR ); in dis_VBLENDV_128() 16700 IRTemp vecE = newTemp(Ity_V256); in dis_VBLENDV_256() local 16706 assign(vecE, getYMMReg(rE)); in dis_VBLENDV_256() [all …]
|