/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_casting_utils.h | 74 const T* DynCast(const HloInstruction* instruction) { in DynCast() function 81 T* DynCast(HloInstruction* instruction) { in DynCast() function 83 DynCast<T>(const_cast<const HloInstruction*>(instruction))); in DynCast() 92 return instruction != nullptr ? DynCast<T>(instruction) : nullptr; in DynCastOrNull()
|
D | hlo_query.cc | 128 DynCast<HloAllReduceInstruction>(hlo)->constrain_layout()) { in ContainsLayoutConstrainedAllReduce() 141 DynCast<HloChannelInstruction>(hlo); in NextChannelId() 155 auto send = DynCast<HloSendInstruction>(hlo); in HasX64TransformedHostTransfer() 160 auto recv = DynCast<HloRecvInstruction>(hlo); in HasX64TransformedHostTransfer()
|
D | hlo_casting_utils_test.cc | 76 DynCast<DummyInstruction>(static_cast<HloInstruction*>(&instruction)); in TEST() 83 DynCast<DummyInstruction>(static_cast<HloInstruction*>(&instruction)); in TEST() 89 ASSERT_DEATH(DynCast<DummyInstruction>(null), ""); in TEST()
|
D | hlo_module_group_util.cc | 99 !DynCast<HloRecvDoneInstruction>(instruction)->is_host_transfer()) { in GlobalPredecessors() 106 !DynCast<HloSendInstruction>(instruction)->is_host_transfer()) { in GlobalPredecessors() 177 !DynCast<HloRecvInstruction>(instruction)->is_host_transfer()) { in GlobalSuccessors() 186 !DynCast<HloSendInstruction>(instruction)->is_host_transfer()) { in GlobalSuccessors()
|
D | topk_rewriter.cc | 70 HloSortInstruction* sort = DynCast<HloSortInstruction>(inst); in Run() 76 DynCast<HloIotaInstruction>(sort->mutable_operand(1)); in Run()
|
D | while_loop_all_reduce_code_motion_test.cc | 106 HloAllReduceInstruction* moved_all_reduce = DynCast<HloAllReduceInstruction>( in TEST_F() 114 EXPECT_EQ(DynCast<HloGetTupleElementInstruction>( in TEST_F() 400 HloAllReduceInstruction* moved_all_reduce = DynCast<HloAllReduceInstruction>( in TEST_F()
|
D | memory_space_assignment_utils.cc | 94 DynCast<HloCustomCallInstruction>(position.instruction)) { in IsValueAllowedInAlternateMemory()
|
D | hlo_dce.cc | 50 auto maybe_collective_op = DynCast<HloAllReduceInstruction>(instruction); in RunOnComputation()
|
D | hlo_instruction.cc | 1131 auto send_operand = DynCast<HloSendInstruction>(operand); in CreateSendDone() 1147 auto recv_operand = DynCast<HloRecvInstruction>(operand); in CreateRecvDone() 2049 if (auto channel_inst = DynCast<HloChannelInstruction>(this)) { in IdenticalInternal() 3478 if (auto hlo_param = DynCast<HloParameterInstruction>(&hlo)) { in ComputeInternal() 3853 if (auto* convolution = DynCast<HloConvolutionInstruction>(this)) { in precision_config() 3856 if (auto* dot = DynCast<HloDotInstruction>(this)) { in precision_config() 3860 if (auto* custom_call = DynCast<HloCustomCallInstruction>(this)) { in precision_config() 3867 if (auto* convolution = DynCast<HloConvolutionInstruction>(this)) { in mutable_precision_config() 3870 if (auto* dot = DynCast<HloDotInstruction>(this)) { in mutable_precision_config() 3915 if (auto set_size = DynCast<HloSetDimensionSizeInstruction>(this)) { in dimension() [all …]
|
D | hlo_verifier.cc | 232 if (auto channel_instr = DynCast<HloChannelInstruction>(hlo)) { in CheckReplicaGroups() 794 DynCast<const HloCustomCallInstruction>(instruction); in HandleCustomCall() 1431 DynCast<const HloSendRecvInstruction>(instr1); in CheckSameIsHostTransfer() 1433 DynCast<const HloSendRecvInstruction>(instr2); in CheckSameIsHostTransfer() 1519 auto all_reduce = DynCast<HloAllReduceInstruction>(instruction); in VerifyLayoutConstrainedAllReduce() 1543 auto channel_instr = DynCast<HloChannelInstruction>(instruction); in VerifyChannels() 1584 auto sendrecv = DynCast<HloSendRecvInstruction>(first); in VerifyChannels() 1589 auto cast = DynCast<HloSendRecvInstruction>(instr); in VerifyChannels() 1758 if (auto* comparison = DynCast<HloCompareInstruction>(instruction)) { in CheckElementwiseInstruction()
|
D | all_to_all_decomposer.cc | 39 auto* all_to_all = DynCast<HloAllToAllInstruction>(instruction); in InstructionMatchesPattern()
|
D | while_loop_all_reduce_code_motion.cc | 223 if (auto* gte = DynCast<HloGetTupleElementInstruction>(user)) { in IsAllReduceMovable() 560 DynCast<HloAllReduceInstruction>(while_body_instruction)) { in Run()
|
D | layout_assignment.cc | 395 DynCast<HloSendRecvInstruction>(instruction); in IsHostSendRecv() 405 DynCast<HloSendRecvInstruction>(instruction); in BuildHostChannelConstraints() 432 DynCast<HloCustomCallInstruction>(instruction); in IsLayoutConstrainedCustomCall() 438 DynCast<HloCollectiveInstruction>(instruction); in IsLayoutConstrainedCollective() 483 DynCast<HloCustomCallInstruction>(instruction); in AddMandatoryConstraints() 692 DynCast<HloCustomCallInstruction>(instruction); in CheckCustomCallLayout()
|
D | conditional_code_motion.cc | 648 DynCast<HloGetTupleElementInstruction>(to_move_in[0].operands()[0]); in MoveInstructionIn() 969 if (auto tuple_gte = DynCast<HloGetTupleElementInstruction>(user)) { in ReusesBeforeBoundary() 1064 DynCast<HloGetTupleElementInstruction>(hlo->operand(0)); in BenefitForMovingBoundaries()
|
D | hlo_module_group_metadata.cc | 206 DynCast<HloSendRecvInstruction>(instruction); in IsChannelInstruction()
|
D | ar_crs_combiner.cc | 55 if (auto ar = DynCast<HloAllReduceInstruction>(instruction)) { in ReplaceReplicatedAllReduce()
|
D | hlo_graph_dumper.cc | 733 return DynCast<HloConstantInstruction>(operand); in TryGetFusionParameterConstant() 869 DynCast<HloConstantInstruction>(operand)) { in GetInstructionNodeInlinedOperands()
|
D | hlo_sharding_util.cc | 1285 if (auto* iota = DynCast<HloIotaInstruction>(op)) { in GetGatherBatchParallelDims() 1295 } else if (auto* iota = DynCast<HloIotaInstruction>(indices)) { in GetGatherBatchParallelDims()
|
/external/llvm-project/llvm/unittests/Support/ |
D | raw_fd_stream_test.cpp | 54 TEST(raw_fd_streamTest, DynCast) { in TEST() argument
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | schedule_aware_all_gather_cse.cc | 30 auto coll = DynCast<HloCollectiveInstruction>(hlo); in MayConsiderAsAllGather()
|
D | canonicalize_all_gather_for_cse.cc | 51 HloAllGatherInstruction* ag = DynCast<HloAllGatherInstruction>(hlo); in RunOnComputation()
|
D | spmd_partitioner_util.cc | 1156 HloSortInstruction* sort = DynCast<HloSortInstruction>(hlo); in GetKValueInTopKWhenPartitionSortDim() 1165 DynCast<HloIotaInstruction>(sort->mutable_operand(1)); in GetKValueInTopKWhenPartitionSortDim()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | cudnn_fused_conv_rewriter.cc | 271 if (auto call = DynCast<HloCustomCallInstruction>(instr)) { in RunFuseBiasSideActivation() 411 if (auto call = DynCast<HloCustomCallInstruction>(instr)) { in RunFuseClamp()
|
/external/llvm-project/lldb/source/Plugins/ExpressionParser/Clang/ |
D | ClangASTSource.cpp | 731 TD<D2> DynCast(TD<D1> source) { in DynCast() function 1478 DynCast<CXXRecordDecl>(origin_base_record)); in ExtractBaseOffsets() 1554 DynCast<const CXXRecordDecl>(origin_record)); in layoutRecordType() 1589 DynCast<const CXXRecordDecl>(parser_record); in layoutRecordType() 1602 DynCast<CXXRecordDecl>(base_record); in layoutRecordType()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | dot_op_emitter.cc | 1192 if (auto* dot_instr = DynCast<HloDotInstruction>(&instr)) { in IsBatchDot()
|