/external/flatbuffers/tests/ |
D | monster_test_generated.lobster | 62 def InParentNamespaceStart(b_:flatbuffers_builder): 63 b_.StartObject(0) 64 def InParentNamespaceEnd(b_:flatbuffers_builder): 65 b_.EndObject() 73 def MonsterStart(b_:flatbuffers_builder): 74 b_.StartObject(0) 75 def MonsterEnd(b_:flatbuffers_builder): 76 b_.EndObject() 86 def CreateTest(b_:flatbuffers_builder, a:int, b:int): 87 b_.Prep(2, 4) [all …]
|
/external/flatbuffers/samples/ |
D | monster_generated.lobster | 30 def CreateVec3(b_:flatbuffers_builder, x:float, y:float, z:float): 31 b_.Prep(4, 12) 32 b_.PrependFloat32(z) 33 b_.PrependFloat32(y) 34 b_.PrependFloat32(x) 35 return b_.Offset() 64 def MonsterStart(b_:flatbuffers_builder): 65 b_.StartObject(10) 66 def MonsterAddPos(b_:flatbuffers_builder, pos:int): 67 b_.PrependStructSlot(0, pos, 0) [all …]
|
/external/flatbuffers/tests/namespace_test/ |
D | namespace_test2_generated.lobster | 29 def TableInFirstNSStart(b_:flatbuffers_builder): 30 b_.StartObject(3) 31 def TableInFirstNSAddFooTable(b_:flatbuffers_builder, foo_table:int): 32 b_.PrependUOffsetTRelativeSlot(0, foo_table, 0) 33 def TableInFirstNSAddFooEnum(b_:flatbuffers_builder, foo_enum:int): 34 b_.PrependInt8Slot(1, foo_enum, 0) 35 def TableInFirstNSAddFooStruct(b_:flatbuffers_builder, foo_struct:int): 36 b_.PrependStructSlot(2, foo_struct, 0) 37 def TableInFirstNSEnd(b_:flatbuffers_builder): 38 b_.EndObject() [all …]
|
D | namespace_test1_generated.lobster | 22 def TableInNestedNSStart(b_:flatbuffers_builder): 23 b_.StartObject(1) 24 def TableInNestedNSAddFoo(b_:flatbuffers_builder, foo:int): 25 b_.PrependInt32Slot(0, foo, 0) 26 def TableInNestedNSEnd(b_:flatbuffers_builder): 27 b_.EndObject() 35 def CreateStructInNestedNS(b_:flatbuffers_builder, a:int, b:int): 36 b_.Prep(4, 8) 37 b_.PrependInt32(b) 38 b_.PrependInt32(a) [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | ir_emitter.cc | 93 b_(llvm_module->getContext()), in IrEmitter() 101 b_.setFastMathFlags(llvm_ir::GetCpuFastMathFlags(hlo_module_config_)); in IrEmitter() 161 hlo_module_config_, module_, &b_, in InitializeIrFunction() 301 GetEmittedValueFor(operand), &b_); in HandleGetTupleElement() 321 GetEmittedValueFor(on_false), &b_); in HandleTupleSelect() 344 llvm_ir::EmitTuple(GetIrArrayFor(infeed), {data_address, token_address}, &b_); in HandleInfeed() 375 tuple_element_addresses, &b_); in HandleInfeed() 398 llvm_ir::EncodeSelfDescribingShapeConstant(shape, &shape_length, &b_)); in EmitXfeedTransfer() 400 llvm::Type* int32_type = b_.getInt32Ty(); in EmitXfeedTransfer() 426 b_.getVoidTy(), in EmitXfeedTransfer() [all …]
|
D | dot_op_emitter.cc | 199 llvm::IRBuilder<>* b_; member in xla::cpu::__anond13f39450111::DotOpEmitter 221 b_(b), in DotOpEmitter() 242 b_->CreateMemSet(target, b_->getInt8(0), /*Size=*/size_bytes, in EmitTiledLlvmIrGemm() 247 *b_->GetInsertBlock()->getParent(), primitive_type); in EmitTiledLlvmIrGemm() 260 /*rhs=*/rhs, /*result=*/target, b_, hlo_module_config_); in EmitTiledLlvmIrGemm() 321 *b_->GetInsertBlock()->getParent(), primitive_type); in EmitTiledLlvmIrGemv() 340 /*result=*/result_op, b_, hlo_module_config_); in EmitTiledLlvmIrGemv() 350 /*result=*/result_op, b_, hlo_module_config_); in EmitTiledLlvmIrGemv() 431 llvm_ir::ForLoopNest loop_nest(llvm_ir::IrName(dot_hlo_name_), b_); in EmitNaiveLlvmIrGemm() 459 b_->getInt64Ty()); in EmitNaiveLlvmIrGemm() [all …]
|
D | tiled_dot_emitter.cc | 39 : vsl_(vsl), b_(b) { in MemoryTile() 88 pointers_[i], b_->CreateAdd(minor_dim_offset, b_->getInt64(j)))); in LoadBroadcastTile() 96 llvm::IRBuilder<>* b_; member in xla::cpu::__anon016ac5180111::MemoryTile 241 b_(b), in ColumnMajorMatrixVectorProductEmitter() 242 ksl_(b_), in ColumnMajorMatrixVectorProductEmitter() 243 vsl_(config.scalar_type(), /*vector_size=*/config.tile_rows(), b_, "") { in ColumnMajorMatrixVectorProductEmitter() 257 return MemoryTile(&vsl_, b_, /*matrix=*/lhs_, in GetLhsMemoryTile() 287 llvm::IRBuilder<>* b_; member in xla::cpu::__anon016ac5180111::ColumnMajorMatrixVectorProductEmitter 316 EmitOuterLoopBody(b_->getInt64(column_limit), column_remainder, in Emit() 348 llvm::Value* columns_llvm = b_->getInt64(columns); in EmitInnerLoopEpilogue() [all …]
|
/external/sfntly/cpp/src/sfntly/data/ |
D | memory_byte_array.cc | 24 : ByteArray(0, length), b_(NULL), allocated_(true) { in MemoryByteArray() 28 : ByteArray(filled_length, filled_length), b_(b), allocated_(false) { in MemoryByteArray() 40 os->Write(b_, offset, length); in CopyTo() 45 if (allocated_ && b_ == NULL) { in Init() 46 b_ = new byte_t[Size()]; in Init() 47 memset(b_, 0, Size()); in Init() 53 b_[index] = b; in InternalPut() 62 memcpy(b_ + index, b + offset, length); in InternalPut() 68 return b_[index]; in InternalGet() 77 memcpy(b + offset, b_ + index, length); in InternalGet() [all …]
|
D | growable_memory_byte_array.cc | 38 os->Write(&b_, offset, length); in CopyTo() 43 if ((size_t)index >= b_.size()) { in InternalPut() 44 b_.resize((size_t)(index + 1)); in InternalPut() 46 b_[index] = b; in InternalPut() 53 if ((size_t)index + length >= b_.size()) { in InternalPut() 56 b_.resize((size_t)(index + length + 1)); in InternalPut() 58 std::copy(b + offset, b + offset + length, b_.begin() + index); in InternalPut() 63 return b_[index]; in InternalGet() 70 memcpy(b + offset, &(b_[0]) + index, length); in InternalGet() 75 b_.clear(); in Close() [all …]
|
/external/u-boot/arch/x86/include/asm/acpi/ |
D | irq_helper.h | 37 #define RP_IRQ_ROUTES(prefix_, func_, a_, b_, c_, d_) \ argument 41 ACPI_DEV_IRQ(0x0000, 1, b_), \ 54 #define RP_IRQ_ROUTES(prefix_, func_, a_, b_, c_, d_) \ argument 58 ACPI_DEV_IRQ(0x0000, 1, b_), \ 75 #define PCI_DEV_PIRQ_ROUTE(dev_, a_, b_, c_, d_) \ argument 77 ACPI_DEV_IRQ(dev_, 1, b_), \ 81 #define PCIE_BRIDGE_DEV(prefix_, dev_, a_, b_, c_, d_) \ argument 82 ROOTPORT_IRQ_ROUTES(prefix_, a_, b_, c_, d_) \ 85 #define ROOTPORT_IRQ_ROUTES(prefix_, a_, b_, c_, d_) \ argument 86 RP_IRQ_ROUTES(prefix_, 0, a_, b_, c_, d_) \ [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | parallel_loop_emitter.cc | 76 llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x, {}, {}, b_); in EmitIndexAndSetExitBasicBlock() 79 block_id = b_->CreateZExtOrTrunc(block_id, index_type, "block_id"); in EmitIndexAndSetExitBasicBlock() 86 llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x, {}, {}, b_); in EmitIndexAndSetExitBasicBlock() 89 thread_id = b_->CreateZExtOrTrunc(thread_id, index_type, "thread_id"); in EmitIndexAndSetExitBasicBlock() 91 llvm::Value* linear_index_base = b_->CreateAdd( in EmitIndexAndSetExitBasicBlock() 92 b_->CreateMul(block_id, in EmitIndexAndSetExitBasicBlock() 109 {b_->CreateICmpULT( in EmitIndexAndSetExitBasicBlock() 115 {}, b_); in EmitIndexAndSetExitBasicBlock() 118 linear_index_base = b_->CreateMul( in EmitIndexAndSetExitBasicBlock() 123 array_indices.emplace_back(linear_index_base, shape_, b_); in EmitIndexAndSetExitBasicBlock() [all …]
|
D | ir_emitter.cc | 61 b_(module_->getContext()), in IrEmitter() 63 &ir_emitter_context->buffer_assignment(), &b_, module_, in IrEmitter() 72 return GetIrArray(*operand, *hlo).EmitReadArrayElement(index, &b_); in DefaultAction() 76 *hlo, GpuElementalIrEmitter(hlo_module_config_, module_, &b_, in DefaultAction() 118 /*alignment=*/1, GetBasePointer(*operand), &b_)); in HandleGetTupleElement() 147 llvm_ir::EmitTuple(GetIrArray(*tuple, *tuple), base_ptrs, &b_); in HandleTuple() 212 {output_address->getType()}, &b_); in MaybeEmitDirectAtomicOperation() 312 llvm::Type* atomic_type = b_.getIntNTy(atomic_size); in EmitAtomicOperationUsingCAS() 325 llvm::BasicBlock* loop_preheader_bb = b_.GetInsertBlock(); in EmitAtomicOperationUsingCAS() 358 b_.GetInsertPoint(), "atomic_op_loop_exit"); in EmitAtomicOperationUsingCAS() [all …]
|
D | elemental_ir_emitter.cc | 96 FPCast(converted_operands[i], b_->getFloatTy()); in EmitLibdeviceMathCall() 115 result = FPCast(result, b_->getHalfTy()); in EmitLibdeviceMathCall() 172 {lhs_value, rhs_value}, {lhs_value->getType()}, b_); in EmitFloatBinaryOp() 268 llvm::Type* type = prim_type == F16 ? b_->getFloatTy() : value->getType(); in EmitTanh() 270 llvm::Value* fast_tanh = llvm_ir::EmitFastTanh(b_, input); in EmitTanh() 300 b_->GetInsertBlock() in EmitDeviceFunctionCall() 315 llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x, {}, {}, b_), in EmitThreadId() 316 b_->getIntNTy(128), /*isSigned=*/true, "block.id"); in EmitThreadId() 319 llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x, {}, {}, b_), in EmitThreadId() 320 b_->getIntNTy(128), /*isSigned=*/true, "thread.id"); in EmitThreadId() [all …]
|
D | ir_emitter_unnested.cc | 201 std::vector<llvm::Type*>(args.size(), b_.getInt8PtrTy()), in BuildKernelPrototype() 248 llvm::ConstantAsMetadata::get(b_.getInt32(1))})); in BuildKernelPrototype() 256 b_.SetInsertPoint(llvm::ReturnInst::Create(context, entry_bb)); in BuildKernelPrototype() 590 hlo_module_config_, ir_emitter_context_->llvm_module(), &b_, in HandleFusion() 611 hlo_module_config_, ir_emitter_context_->llvm_module(), &b_, in HandleFusion() 655 &b_, GetNestedComputer()); in HandleFusion() 672 &elemental_emitter, launch_dimensions, &b_); in HandleFusion() 713 .EmitArrayElementAddress(index, &b_, in EmitExtraOutputsForReduce() 810 select_and_scatter, launch_dimensions.launch_bound(), &b_); in HandleSelectAndScatter() 841 "selected_value_address", &b_); in HandleSelectAndScatter() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | elemental_ir_emitter.cc | 232 return b_->CreateZExt( in EmitIntegerUnaryOp() 245 F32, module_, b_), in EmitIntegerUnaryOp() 246 b_); in EmitIntegerUnaryOp() 249 module_, b_); in EmitIntegerUnaryOp() 300 auto is_zero_undef = b_->getFalse(); in EmitIntegerUnaryOp() 303 {operand_value->getType()}, b_); in EmitIntegerUnaryOp() 322 return b_->CreateZExt(Not(Trunc(operand_value, b_->getInt1Ty())), in EmitIntegerUnaryOp() 359 operand_value = EmitBF16ToF32(operand_value, b_); in EmitFloatUnaryOp() 366 return EmitF32ToBF16(operand_value, b_); in EmitFloatUnaryOp() 369 return b_->CreateZExt( in EmitFloatUnaryOp() [all …]
|
/external/protobuf/src/google/protobuf/stubs/ |
D | common_unittest.cc | 175 void SetABMethod(int a, const char* b) { a_ = a; b_ = b; } in SetABMethod() 178 current_instance_->b_ = b; in SetABFunction() 184 b_ = NULL; in SetUp() 194 const char* b_; member in google::protobuf::__anonbe4607990111::ClosureTest 252 EXPECT_NE(cstr, b_); in TEST_F() 255 EXPECT_EQ(cstr, b_); in TEST_F() 263 EXPECT_NE(cstr, b_); in TEST_F() 266 EXPECT_EQ(cstr, b_); in TEST_F() 321 EXPECT_NE(cstr, b_); in TEST_F() 324 EXPECT_EQ(cstr, b_); in TEST_F() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | kernel_support_library.cc | 25 return IfWithStatus(b_->CreateICmpSLT(start, end), [&]() -> Status { in ForWithStatus() 28 name, b_->CreateAdd(start, step), end, step, in ForWithStatus() 42 return for_body_generator(indvar, b_->getInt1(is_first_iteration)); in ForWithStatus() 46 name, start, end, step, b_, in ForWithStatus() 49 b_->SetInsertPoint(&loop->GetBodyBasicBlock()->back()); in ForWithStatus() 52 /*is_first_iteration=*/b_->CreateICmpEQ( in ForWithStatus() 54 llvm_ir::SetToLastInsertPoint(loop->GetExitBasicBlock(), b_); in ForWithStatus() 63 llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(condition, name, b_); in IfWithStatus() 64 b_->SetInsertPoint(&if_data.true_block->back()); in IfWithStatus() 66 b_->SetInsertPoint(&if_data.false_block->back()); in IfWithStatus() [all …]
|
D | kernel_support_library.h | 40 : b_(b), in b_() function 75 return ForWithStatus(name, /*start=*/b_->getInt64(start), in ForWithStatus() 76 /*end=*/b_->getInt64(end), in ForWithStatus() 77 /*step=*/b_->getInt64(step), for_body_generator); in ForWithStatus() 84 For(name, /*start=*/b_->getInt64(start), in For() 85 /*end=*/b_->getInt64(end), in For() 86 /*step=*/b_->getInt64(step), for_body_generator); in For() 187 return ForWithStatus(name, /*start=*/b_->getInt64(start), in ForWithStatus() 188 /*end=*/b_->getInt64(end), in ForWithStatus() 189 /*step=*/b_->getInt64(step), for_body_generator); in ForWithStatus() [all …]
|
D | kernel_tiling.cc | 122 : b_(b), in KernelMappingScheme() 156 unnormalized_shape, b_); in GetUnnormalizedIndex() 161 llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x, {}, {}, b_); in EmitBlockIndex() 165 b_->CreateIntCast(block_id, index_ty, /*isSigned=*/true, "block.id.x"); in EmitBlockIndex() 169 b_); in EmitBlockIndex() 178 multidim.push_back(b_->CreateMul( in GetTileIndexForBlockOrigin() 191 b_->CreateMul(tile_index[i], in GetElementIndexForTileOrigin() 213 return llvm_ir::AllocateSharedMemoryTile(b_->GetInsertBlock()->getModule(), in GetSharedMemoryBufferForElementType() 222 llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x, {}, {}, b_); in EmitThreadYXCoordinate() 225 b_->CreateIntCast(thread_id_raw, index_ty, in EmitThreadYXCoordinate() [all …]
|
D | fused_ir_emitter.cc | 63 generated_value_bb == b_->GetInsertBlock()) { in DefaultAction() 71 << b_->GetInsertBlock()->getName().str() << ")."; in DefaultAction() 88 *b_->GetInsertBlock()->getModule(), initializer->getType(), in HandleConstant() 98 .EmitReadArrayElement(index, b_); in HandleConstant() 126 /*alignment=*/1, tuple_ptr, b_); in HandleGetTupleElement() 136 .EmitReadArrayElement(index, b_); in HandleGetTupleElement() 156 return b_->CreateLoad( in HandleParameter() 157 b_->CreateGEP(param_tile_buffer, {index.GetConstantWithIndexType(0), in HandleParameter() 164 .EmitReadArrayElement(index, b_); in HandleParameter() 179 llvm::StructType::get(b_->getContext(), operand_elemental_ir_types)); in HandleTuple() [all …]
|
/external/boringssl/src/crypto/chacha/asm/ |
D | chacha-x86.pl | 54 ($b,$b_)=("ebx","ebp"); 92 &mov (&DWP(4*$bp,"esp"),$b_) if ($i!=0); 98 &mov ($b_,&DWP(4*$bn,"esp")) if ($i<7); 99 &mov ($b_,&DWP(128,"esp")) if ($i==7); # loop counter 109 &add ($a,$b_) if ($i<7); # elsewhere 112 ($b,$b_)=($b_,$b); 180 &mov ($b_,&DWP(64+4*6,"esp")); 186 &mov (&DWP(4*6,"esp"),$b_); 195 &mov ($b_,&DWP(64+4*4,"esp")); 207 &add ($a,$b_); # elsewhere [all …]
|
/external/freetype/src/base/ |
D | ftcalc.c | 175 FT_Long b_, in FT_MulDiv() argument 184 b = (FT_UInt64)b_; in FT_MulDiv() 188 FT_MOVE_SIGN( b_, b, s ); in FT_MulDiv() 204 FT_Long b_, in FT_MulDiv_No_Round() argument 213 b = (FT_UInt64)b_; in FT_MulDiv_No_Round() 217 FT_MOVE_SIGN( b_, b, s ); in FT_MulDiv_No_Round() 233 FT_Long b_ ) in FT_MulFix() argument 237 return FT_MULFIX_ASSEMBLER( (FT_Int32)a_, (FT_Int32)b_ ); in FT_MulFix() 241 FT_Int64 ab = (FT_Int64)a_ * (FT_Int64)b_; in FT_MulFix() 254 FT_Long b_ ) in FT_DivFix() argument [all …]
|
/external/bzip2/ |
D | bzip2.1.preformatted | 19 _b_z_i_p_2 compresses files using the Burrows‐Wheeler block 29 _b_z_i_p_2 expects a list of file names to accompany the com 41 _b_z_i_p_2 and _b_u_n_z_i_p_2 will by default not overwrite existing 44 If no file names are specified, _b_z_i_p_2 compresses from 45 standard input to standard output. In this case, _b_z_i_p_2 50 _b_u_n_z_i_p_2 (or _b_z_i_p_2 _−_d_) decompresses all specified files. 51 Files which were not created by _b_z_i_p_2 will be detected and 52 ignored, and a warning issued. _b_z_i_p_2 attempts to guess 63 …_._b_z_2_, _._b_z_, _._t_b_z_2 or _._t_b_z_, _b_z_i_p_2 complains that i… 70 _b_u_n_z_i_p_2 will correctly decompress a file which is the con [all …]
|
/external/tensorflow/tensorflow/core/lib/gtl/ |
D | flatset.h | 111 const_iterator() : b_(nullptr), end_(nullptr), i_(0) {} in const_iterator() 114 const_iterator(Bucket* b, Bucket* end) : b_(b), end_(end), i_(0) { in const_iterator() 120 : b_(b), end_(end), i_(i) {} in const_iterator() 125 return b_ == x.b_ && i_ == x.i_; 129 DCHECK(b_ != end_); 142 Bucket* b_; variable 146 reference key() const { return b_->key(i_); } in key() 148 while (b_ < end_) { in SkipUnused() 151 b_++; in SkipUnused() 152 } else if (b_->marker[i_] < 2) { in SkipUnused() [all …]
|
/external/clang/test/Analysis/ |
D | temp-obj-dtors-cfg-output.cpp | 57 C():b_(true) {} in C() 60 operator bool() { return b_; } in operator bool() 61 bool b_; member 65 D():b_(true) {} in D() 67 operator bool() { return b_; } in operator bool() 68 bool b_; member
|