/external/tensorflow/tensorflow/core/kernels/ |
D | segment_reduction_ops_impl_3.cc | 22 name, type, index_type, initial_value_functor, reduction_functor) \ argument 27 .TypeConstraint<index_type>("Tindices"), \ 29 type, index_type, \ 30 functor::UnsortedSegmentFunctor<CPUDevice, type, index_type, \ 34 #define REGISTER_REAL_CPU_UNSORTED_KERNELS(type, index_type) \ argument 35 REGISTER_CPU_KERNEL_UNSORTEDSEGMENT("UnsortedSegmentSum", type, index_type, \ 38 REGISTER_CPU_KERNEL_UNSORTEDSEGMENT("UnsortedSegmentMax", type, index_type, \ 41 REGISTER_CPU_KERNEL_UNSORTEDSEGMENT("UnsortedSegmentMin", type, index_type, \ 44 REGISTER_CPU_KERNEL_UNSORTEDSEGMENT("UnsortedSegmentProd", type, index_type, \ 48 #define REGISTER_COMPLEX_CPU_UNSORTED_KERNELS(type, index_type) \ argument [all …]
|
D | segment_reduction_ops_impl_4.cc | 22 name, type, index_type, initial_value_functor, reduction_functor) \ argument 27 .TypeConstraint<index_type>("Tindices"), \ 29 type, index_type, \ 30 functor::UnsortedSegmentFunctor<CPUDevice, type, index_type, \ 34 #define REGISTER_REAL_CPU_UNSORTED_KERNELS(type, index_type) \ argument 35 REGISTER_CPU_KERNEL_UNSORTEDSEGMENT("UnsortedSegmentSum", type, index_type, \ 38 REGISTER_CPU_KERNEL_UNSORTEDSEGMENT("UnsortedSegmentMax", type, index_type, \ 41 REGISTER_CPU_KERNEL_UNSORTEDSEGMENT("UnsortedSegmentMin", type, index_type, \ 44 REGISTER_CPU_KERNEL_UNSORTEDSEGMENT("UnsortedSegmentProd", type, index_type, \ 48 #define REGISTER_COMPLEX_CPU_UNSORTED_KERNELS(type, index_type) \ argument [all …]
|
D | segment_reduction_ops_impl_2.cc | 21 #define REGISTER_CPU_KERNEL_SEGMENT(name, functor, type, index_type, \ argument 27 .TypeConstraint<index_type>("Tindices"), \ 28 SegmentReductionOp<CPUDevice, type, index_type, functor, default_value>) 30 #define REGISTER_REAL_CPU_KERNELS(type, index_type) \ argument 32 type, index_type, 0); \ 34 "SegmentMean", Eigen::internal::MeanReducer<type>, type, index_type, 0); \ 36 "SegmentProd", Eigen::internal::ProdReducer<type>, type, index_type, 1); \ 38 type, index_type, 0); \ 40 type, index_type, 0) 42 #define REGISTER_COMPLEX_CPU_KERNELS(type, index_type) \ argument [all …]
|
D | segment_reduction_ops_impl_1.cc | 69 #define REGISTER_CPU_KERNEL_SEGMENT(name, functor, type, index_type, \ argument 75 .TypeConstraint<index_type>("Tindices"), \ 76 SegmentReductionOp<CPUDevice, type, index_type, functor, default_value>) 78 #define REGISTER_REAL_CPU_KERNELS(type, index_type) \ argument 80 type, index_type, 0); \ 82 "SegmentMean", Eigen::internal::MeanReducer<type>, type, index_type, 0); \ 84 "SegmentProd", Eigen::internal::ProdReducer<type>, type, index_type, 1); \ 86 type, index_type, 0); \ 88 type, index_type, 0) 90 #define REGISTER_COMPLEX_CPU_KERNELS(type, index_type) \ argument [all …]
|
D | one_hot_op.cc | 129 #define REGISTER_ONE_HOT_INDEX(type, index_type) \ argument 132 .TypeConstraint<index_type>("TI") \ 135 OneHotOp<CPUDevice, type, index_type>); 174 #define REGISTER_ONE_HOT_GPU_INDEX(type, index_type) \ argument 177 .TypeConstraint<index_type>("TI") \ 180 OneHotOp<GPUDevice, type, index_type>);
|
D | scatter_nd_op.cc | 310 #define REGISTER_SCATTER_ND_KERNEL_INDEX(type, index_type, dev, name) \ argument 314 .TypeConstraint<index_type>("Tindices") \ 316 ScatterNdOp<dev##Device, type, index_type>) 318 #define REGISTER_SCATTER_ND_UPDATE_KERNEL_INDEX(type, index_type, dev, name, \ argument 324 .TypeConstraint<index_type>("Tindices"), \ 325 ScatterNdUpdateOp<dev##Device, type, index_type, op>) 327 #define REGISTER_RESOURCE_SCATTER_ND_UPDATE_KERNEL_INDEX(type, index_type, \ argument 333 .TypeConstraint<index_type>("Tindices") \ 335 ScatterNdUpdateOp<dev##Device, type, index_type, op>) 390 #define REGISTER_SCATTER_ND_TENSOR_UPDATE_TYPE_INDEX_TYPE(type, index_type, \ argument [all …]
|
D | gather_op.cc | 189 #define REGISTER_GATHER_FULL(dev, type, index_type) \ argument 193 .TypeConstraint<index_type>("Tindices"), \ 194 GatherOp<dev##Device, type, index_type>); \ 198 .TypeConstraint<index_type>("Tindices") \ 200 GatherOp<dev##Device, type, index_type>)
|
D | gather_nd_op.cc | 52 #define REGISTER_GATHER_ND_FULL(dev, type, index_type) \ argument 56 .TypeConstraint<index_type>("Tindices"), \ 57 GatherNdOp<dev##Device, type, index_type>)
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | parallel_loop_emitter.cc | 61 llvm::Type* index_type) { in EmitIndexAndSetExitBasicBlock() argument 74 CHECK_NE(index_type, nullptr); in EmitIndexAndSetExitBasicBlock() 80 block_id = b_->CreateZExtOrTrunc(block_id, index_type, "block_id"); in EmitIndexAndSetExitBasicBlock() 90 thread_id = b_->CreateZExtOrTrunc(thread_id, index_type, "thread_id"); in EmitIndexAndSetExitBasicBlock() 95 index_type, launch_dimensions_.threads_per_block()), in EmitIndexAndSetExitBasicBlock() 112 llvm::ConstantInt::get(index_type, in EmitIndexAndSetExitBasicBlock() 120 linear_index_base, llvm::ConstantInt::get(index_type, unroll_factor_), in EmitIndexAndSetExitBasicBlock() 127 b_->CreateAdd(linear_index_base, llvm::ConstantInt::get(index_type, i), in EmitIndexAndSetExitBasicBlock() 136 llvm::ConstantInt::get(index_type, ShapeUtil::ElementsIn(shape_))), in EmitIndexAndSetExitBasicBlock()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | dynamic_slice_ops.cc | 37 DataType index_type = ctx->InputType("indices"); in Compile() local 38 CHECK(index_type == DT_INT32 || index_type == DT_INT64); in Compile() 71 DataType index_type = ctx->InputType("start_indices"); in Compile() local 72 CHECK(index_type == DT_INT32 || index_type == DT_INT64); in Compile() 73 CHECK(index_type == ctx->InputType("size_indices")); in Compile()
|
D | matrix_band_part_op.cc | 54 DataType index_type = context->input_type(1); in Compile() local 72 auto zero_index = XlaHelpers::Zero(builder, index_type); in Compile() 74 XlaHelpers::IntegerLiteral(builder, index_type, m), in Compile() 77 XlaHelpers::IntegerLiteral(builder, index_type, n), in Compile()
|
D | gather_op.cc | 38 DataType index_type, xla::XlaBuilder* builder, in XlaGather() argument 216 DataType index_type = context->input_type(1); in XlaGatherWithBatchDimsOpImpl() local 217 if (index_type != DT_INT32 && index_type != DT_INT64) { in XlaGatherWithBatchDimsOpImpl() 230 index_type, context->builder(), gather_output)); in XlaGatherWithBatchDimsOpImpl()
|
/external/libcxx/include/ |
D | span | 43 using index_type = ptrdiff_t; 51 static constexpr index_type extent = Extent; 55 constexpr span(pointer ptr, index_type count); 81 constexpr span<element_type, dynamic_extent> first(index_type count) const; 82 constexpr span<element_type, dynamic_extent> last(index_type count) const; 83 …constexpr span<element_type, dynamic_extent> subspan(index_type offset, index_type count = dynamic… 86 constexpr index_type size() const noexcept; 87 constexpr index_type size_bytes() const noexcept; 91 constexpr reference operator[](index_type idx) const; 92 constexpr reference operator()(index_type idx) const; [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | loop_emitter.cc | 88 absl::string_view loop_name, llvm::Type* index_type) { in EmitIndexAndSetExitBasicBlock() argument 89 CHECK_NE(index_type, nullptr); in EmitIndexAndSetExitBasicBlock() 93 return {IrArray::Index(index_type)}; in EmitIndexAndSetExitBasicBlock() 110 IrArray::Index array_index(array_multi_index, shape_, index_type); in EmitIndexAndSetExitBasicBlock() 126 llvm::Type* index_type) { in EmitLoop() argument 127 if (index_type == nullptr) { in EmitLoop() 128 index_type = b_->getInt64Ty(); in EmitLoop() 132 EmitIndexAndSetExitBasicBlock(loop_name, index_type)) { in EmitLoop()
|
/external/tensorflow/tensorflow/lite/testing/nnapi_tflite_zip_tests/ |
D | test_manifest.txt | 1763 …e,constant_indices=False,dtype=tf.float32,end=[12,2,2,5],end_mask=None,index_type=tf.int32,input_s… 1764 …e,constant_indices=False,dtype=tf.float32,end=[12,2,2,5],end_mask=None,index_type=tf.int32,input_s… 1765 …one,constant_indices=False,dtype=tf.int32,end=[12,2,2,5],end_mask=None,index_type=tf.int32,input_s… 1766 …one,constant_indices=False,dtype=tf.int32,end=[12,2,2,5],end_mask=None,index_type=tf.int32,input_s… 1767 …one,constant_indices=False,dtype=tf.int64,end=[12,2,2,5],end_mask=None,index_type=tf.int32,input_s… 1768 …one,constant_indices=False,dtype=tf.int64,end=[12,2,2,5],end_mask=None,index_type=tf.int32,input_s… 1769 …ne,constant_indices=True,dtype=tf.float32,end=[12,2,2,5],end_mask=None,index_type=tf.int32,input_s… 1770 …ne,constant_indices=True,dtype=tf.float32,end=[12,2,2,5],end_mask=None,index_type=tf.int32,input_s… 1771 …None,constant_indices=True,dtype=tf.int32,end=[12,2,2,5],end_mask=None,index_type=tf.int32,input_s… 1772 …None,constant_indices=True,dtype=tf.int32,end=[12,2,2,5],end_mask=None,index_type=tf.int32,input_s… [all …]
|
/external/tensorflow/tensorflow/lite/testing/op_tests/ |
D | slice.py | 124 index_type = TF_TYPE_INFO[parameters["index_type"]][0] 125 begin_values = np.array(parameters["begin"]).astype(index_type) 126 size_values = np.array(parameters["size"]).astype(index_type) 153 index_type = TF_TYPE_INFO[parameters["index_type"]][0] 154 begin_values = np.array(parameters["begin"]).astype(index_type) 155 size_values = np.array(parameters["size"]).astype(index_type)
|
D | strided_slice.py | 76 index_type = TF_TYPE_INFO[parameters["index_type"]][0] 79 begin_values = np.array(parameters["begin"]).astype(index_type) 80 end_values = np.array(parameters["end"]).astype(index_type) 82 np.array(parameters["strides"]).astype(index_type)
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | arithmetic.cc | 117 PrimitiveType index_type, bool is_min) { in CreateMinMaxComputation() argument 123 Parameter(b, 1, ShapeUtil::MakeShape(index_type, {}), "lhs_index"); in CreateMinMaxComputation() 127 Parameter(b, 3, ShapeUtil::MakeShape(index_type, {}), "rhs_index"); in CreateMinMaxComputation() 147 auto index_type = dimension_size <= INT32_MAX ? S32 : output_type; in ArgMinMax() local 148 XlaOp index_init_value = Zero(builder, index_type); in ArgMinMax() 150 iota_shape.set_element_type(index_type); in ArgMinMax() 154 builder, input_shape.element_type(), index_type, is_min); in ArgMinMax() 159 if (index_type != output_type) { in ArgMinMax()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | slice_test.cc | 31 template <typename input_type, typename index_type> 36 std::initializer_list<index_type> begin_data, in SliceOpModel() 38 std::initializer_list<index_type> size_data, in SliceOpModel() 47 AddConstInput(GetTensorType<index_type>(), begin_data, begin_shape); in SliceOpModel() 48 size_ = AddConstInput(GetTensorType<index_type>(), size_data, size_shape); in SliceOpModel() 56 PopulateTensor<index_type>(begin_, begin_data); in SliceOpModel() 57 PopulateTensor<index_type>(size_, size_data); in SliceOpModel()
|
/external/linux-kselftest/tools/testing/selftests/bpf/ |
D | test_btf.h | 22 #define BTF_ARRAY_ENC(type, index_type, nr_elems) \ argument 23 (type), (index_type), (nr_elems) 24 #define BTF_TYPE_ARRAY_ENC(type, index_type, nr_elems) \ argument 26 BTF_ARRAY_ENC(type, index_type, nr_elems)
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | parallel_loop_emitter.cc | 34 llvm::Type* index_type) { in EmitIndexAndSetExitBasicBlock() argument 35 CHECK_NE(index_type, nullptr); in EmitIndexAndSetExitBasicBlock() 74 llvm_ir::IrArray::Index array_index(array_multi_index, shape_, index_type); in EmitIndexAndSetExitBasicBlock()
|
/external/swiftshader/third_party/SPIRV-Tools/source/opt/ |
D | graphics_robust_access_pass.cpp | 304 const auto* index_type = in ClampIndicesForAccessChain() local 306 assert(index_type); in ClampIndicesForAccessChain() 307 const auto index_width = index_type->width(); in ClampIndicesForAccessChain() 311 return replace_index(operand_index, GetValueForType(0, index_type)); in ClampIndicesForAccessChain() 355 return replace_index(operand_index, GetValueForType(0, index_type)); in ClampIndicesForAccessChain() 390 index_inst = WidenInteger(index_type->IsSigned(), maxval_width, in ClampIndicesForAccessChain() 411 const auto* index_type = in ClampIndicesForAccessChain() local 415 assert(index_type); in ClampIndicesForAccessChain() 427 << index_type->width() << "bits"; in ClampIndicesForAccessChain() 432 const auto index_width = index_type->width(); in ClampIndicesForAccessChain() [all …]
|
/external/angle/third_party/spirv-tools/src/source/opt/ |
D | graphics_robust_access_pass.cpp | 304 const auto* index_type = in ClampIndicesForAccessChain() local 306 assert(index_type); in ClampIndicesForAccessChain() 307 const auto index_width = index_type->width(); in ClampIndicesForAccessChain() 311 return replace_index(operand_index, GetValueForType(0, index_type)); in ClampIndicesForAccessChain() 355 return replace_index(operand_index, GetValueForType(0, index_type)); in ClampIndicesForAccessChain() 390 index_inst = WidenInteger(index_type->IsSigned(), maxval_width, in ClampIndicesForAccessChain() 411 const auto* index_type = in ClampIndicesForAccessChain() local 415 assert(index_type); in ClampIndicesForAccessChain() 427 << index_type->width() << "bits"; in ClampIndicesForAccessChain() 432 const auto index_width = index_type->width(); in ClampIndicesForAccessChain() [all …]
|
/external/deqp-deps/SPIRV-Tools/source/opt/ |
D | graphics_robust_access_pass.cpp | 304 const auto* index_type = in ClampIndicesForAccessChain() local 306 assert(index_type); in ClampIndicesForAccessChain() 307 const auto index_width = index_type->width(); in ClampIndicesForAccessChain() 311 return replace_index(operand_index, GetValueForType(0, index_type)); in ClampIndicesForAccessChain() 355 return replace_index(operand_index, GetValueForType(0, index_type)); in ClampIndicesForAccessChain() 390 index_inst = WidenInteger(index_type->IsSigned(), maxval_width, in ClampIndicesForAccessChain() 411 const auto* index_type = in ClampIndicesForAccessChain() local 415 assert(index_type); in ClampIndicesForAccessChain() 427 << index_type->width() << "bits"; in ClampIndicesForAccessChain() 432 const auto index_width = index_type->width(); in ClampIndicesForAccessChain() [all …]
|
/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/ |
D | Fill.pbtxt | 24 type_attr: "index_type" 39 name: "index_type"
|