Searched refs:EmitCallToIntrinsic (Results 1 – 12 of 12) sorted by relevance
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | parallel_loop_emitter.cc | 75 llvm::Value* block_id = llvm_ir::EmitCallToIntrinsic( in EmitIndexAndSetExitBasicBlock() 85 llvm::Value* thread_id = llvm_ir::EmitCallToIntrinsic( in EmitIndexAndSetExitBasicBlock() 107 llvm_ir::EmitCallToIntrinsic( in EmitIndexAndSetExitBasicBlock()
|
D | ir_emission_utils.cc | 216 return llvm_ir::EmitCallToIntrinsic( in EmitFullWarpShuffleDown() 233 llvm_ir::EmitCallToIntrinsic( in EmitFullWarpShuffleDown() 282 llvm_ir::EmitCallToIntrinsic( in IsBlock0Thread0() 286 llvm_ir::EmitCallToIntrinsic( in IsBlock0Thread0()
|
D | elemental_ir_emitter.cc | 169 return llvm_ir::EmitCallToIntrinsic( in EmitFloatBinaryOp() 314 IntCast(llvm_ir::EmitCallToIntrinsic( in EmitThreadId() 318 IntCast(llvm_ir::EmitCallToIntrinsic( in EmitThreadId() 322 IntCast(llvm_ir::EmitCallToIntrinsic( in EmitThreadId()
|
D | ir_emitter.cc | 210 llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_atomic_load_add_f32, in MaybeEmitDirectAtomicOperation()
|
D | ir_emitter_unnested.cc | 3169 llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_barrier0, {}, {}, &b_); in EmitKernel() 3191 llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_barrier0, {}, {}, &b_); in EmitKernel()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | elemental_ir_emitter.cc | 301 return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::ctlz, in EmitIntegerUnaryOp() 428 return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::floor, in EmitFloatUnaryOp() 432 return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::ceil, in EmitFloatUnaryOp() 436 return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, in EmitFloatUnaryOp() 446 llvm::Value* result = llvm_ir::EmitCallToIntrinsic( in EmitFloatUnaryOp() 457 auto abs_value = llvm_ir::EmitCallToIntrinsic( in EmitFloatUnaryOp() 640 return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::sqrt, {sum_sq}, in EmitComplexUnaryOp() 647 auto cplx_abs = llvm_ir::EmitCallToIntrinsic( in EmitComplexUnaryOp() 1061 return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::log, {value}, in EmitLog() 1078 llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {value}, {type}, b_); in EmitLog1p() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | sort_util.cc | 152 llvm::Value* thread_id = llvm_ir::EmitCallToIntrinsic( in EmitTiledCompareLoop() 204 llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_barrier0, {}, {}, b); in EmitTiledCompareLoop() 265 llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_barrier0, {}, {}, b); in EmitTiledCompareLoop()
|
D | kernel_tiling.cc | 160 llvm::Value* block_id = llvm_ir::EmitCallToIntrinsic( in EmitBlockIndex() 221 llvm::CallInst* thread_id_raw = llvm_ir::EmitCallToIntrinsic( in EmitThreadYXCoordinate()
|
D | llvm_util.h | 104 llvm::CallInst* EmitCallToIntrinsic(
|
D | llvm_util.cc | 81 llvm::CallInst* EmitCallToIntrinsic( in EmitCallToIntrinsic() function
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | vector_support_library.cc | 94 return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::floor, {a}, in Floor()
|
D | ir_emitter.cc | 1483 return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::maxnum, in MatchReductionGenerator() 1499 return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::minnum, in MatchReductionGenerator()
|