/external/tensorflow/tensorflow/compiler/tf2xla/g3doc/ |
D | cpu_supported_ops.md | 5 `Abs` | `T={double,float,int32,int64}` 6 `Acos` | `T={complex64,double,float,int32,int64}` 8 `Add` | `T={complex64,double,float,int32,int64}` 9 `AddN` | `T={complex64,double,float,int32,int64,uint32,uint64}` 13 `All` | `Tidx={int32,int64}` 15 `Any` | `Tidx={int32,int64}` 16 `ApproximateEqual` | `T={complex64,double,float,int32,int64,uint32,uint64}` 17 `ArgMax` | `Tidx={int32,int64}`<br>`output_type={int32,int64}`<br>`T={… 18 … | `Tidx={int32,int64}`<br>`output_type={int32,int64}`<br>`T={complex64,double… 19 `Asin` | `T={complex64,double,float,int32,int64}` [all …]
|
D | gpu_supported_ops.md | 5 `Abs` | `T={double,float,int32,int64}` 6 `Acos` | `T={complex64,double,float,int32,int64}` 8 `Add` | `T={complex64,double,float,int32,int64}` 9 `AddN` | `T={complex64,double,float,int32,int64,uint32,uint64}` 13 `All` | `Tidx={int32,int64}` 15 `Any` | `Tidx={int32,int64}` 16 `ApproximateEqual` | `T={complex64,double,float,int32,int64,uint32,uint64}` 17 … | `Tidx={int32,int64}`<br>`output_type={int32,int64}`<br>`T={complex64,double… 18 … | `Tidx={int32,int64}`<br>`output_type={int32,int64}`<br>`T={complex64,double… 19 `Asin` | `T={complex64,double,float,int32,int64}` [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | deep_conv2d.cc | 48 static int64 GetDeepConvCost(int input_tile_rows, int input_tile_cols, in GetDeepConvCost() 52 const int64 input_tile_spatial_size = input_tile_rows * input_tile_cols; in GetDeepConvCost() 53 const int64 input_transform_cost = in GetDeepConvCost() 57 const int64 product_cost = input_tile_spatial_size * in_depth * out_depth; in GetDeepConvCost() 60 const int64 output_tile_spatial_size = out_tile_rows * out_tile_cols; in GetDeepConvCost() 61 const int64 output_transform_cost = in GetDeepConvCost() 65 const int64 row_tiles = (out_rows + out_tile_rows - 1) / out_tile_rows; in GetDeepConvCost() 66 const int64 col_tiles = (out_cols + out_tile_cols - 1) / out_tile_cols; in GetDeepConvCost() 67 const int64 num_tiles = row_tiles * col_tiles; in GetDeepConvCost() 74 static int64 GetDirectConvCost(int filter_rows, int filter_cols, int in_depth, in GetDirectConvCost() [all …]
|
D | sparse_cross_op.cc | 44 virtual int64 FeatureCount(int64 batch) const = 0; 47 virtual InternalType Feature(int64 batch, int64 n, 57 SparseTensorColumn(const Tensor& values, std::vector<int64> feature_counts, in SparseTensorColumn() 58 std::vector<int64> feature_start_indices) in SparseTensorColumn() 65 int64 FeatureCount(int64 batch) const override { in FeatureCount() 69 InternalType Feature(int64 batch, int64 n, bool strong_hash) const override; 75 std::vector<int64> feature_counts_; 76 std::vector<int64> feature_start_indices_; 84 std::vector<int64> feature_counts, in KeyedSparseTensorColumn() 85 std::vector<int64> feature_start_indices, in KeyedSparseTensorColumn() [all …]
|
D | quantized_add_op.cc | 47 int64 num_elements, T scalar_input, float scalar_input_min, in ScalarAddition() 64 int64 num_elements, quint8 scalar_input, in ScalarAddition() 74 const int64 input_0_int64 = in ScalarAddition() 76 const int64 input_1_int64 = in ScalarAddition() 80 const int64 lowest_quantized = in ScalarAddition() 81 static_cast<int64>(Eigen::NumTraits<qint32>::lowest()); in ScalarAddition() 82 const int64 highest_quantized = in ScalarAddition() 83 static_cast<int64>(Eigen::NumTraits<qint32>::highest()); in ScalarAddition() 89 int64 i = 0; in ScalarAddition() 103 const int64 full_input_value = static_cast<int64>(full_input[i]); in ScalarAddition() [all …]
|
D | ragged_tensor_from_variant_op_test.cc | 79 const std::vector<int64> split_1 = {0, 1, 2, 3, 4, 5}; in TEST_F() 80 const std::vector<int64> split_2 = {0, 1, 2, 5, 6, 7}; in TEST_F() 83 auto encoded_variant = CreateVariantFromRagged<int, int64>( in TEST_F() 89 test::FillValues<int64>(&expected_splits_1, split_1); in TEST_F() 90 test::FillValues<int64>(&expected_splits_2, split_2); in TEST_F() 95 BuildDecodeRaggedTensorGraph<int, int64>(input_ragged_rank, in TEST_F() 100 test::ExpectTensorEqual<int64>(*GetOutput(0), expected_splits_1); in TEST_F() 101 test::ExpectTensorEqual<int64>(*GetOutput(1), expected_splits_2); in TEST_F() 106 const std::vector<int64> split_1 = {0, 1, 2, 3, 4, 5}; in TEST_F() 107 const std::vector<int64> split_2 = {0, 1, 2, 5, 6, 7}; in TEST_F() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | runtime_conv2d.h | 26 float* lhs, float* rhs, tensorflow::int64 input_batch, 27 tensorflow::int64 input_rows, tensorflow::int64 input_cols, 28 tensorflow::int64 input_channels, tensorflow::int64 kernel_rows, 29 tensorflow::int64 kernel_cols, tensorflow::int64 kernel_channels, 30 tensorflow::int64 kernel_filters, tensorflow::int64 output_rows, 31 tensorflow::int64 output_cols, tensorflow::int64 row_stride, 32 tensorflow::int64 col_stride, tensorflow::int64 padding_top, 33 tensorflow::int64 padding_bottom, tensorflow::int64 padding_left, 34 tensorflow::int64 padding_right, tensorflow::int64 lhs_row_dilation, 35 tensorflow::int64 lhs_col_dilation, tensorflow::int64 rhs_row_dilation, [all …]
|
D | runtime_single_threaded_conv2d.h | 27 tensorflow::int64 input_batch, tensorflow::int64 input_rows, 28 tensorflow::int64 input_cols, tensorflow::int64 input_channels, 29 tensorflow::int64 kernel_rows, tensorflow::int64 kernel_cols, 30 tensorflow::int64 kernel_channels, tensorflow::int64 kernel_filters, 31 tensorflow::int64 output_rows, tensorflow::int64 output_cols, 32 tensorflow::int64 row_stride, tensorflow::int64 col_stride, 33 tensorflow::int64 padding_top, tensorflow::int64 padding_bottom, 34 tensorflow::int64 padding_left, tensorflow::int64 padding_right, 35 tensorflow::int64 lhs_row_dilation, tensorflow::int64 lhs_col_dilation, 36 tensorflow::int64 rhs_row_dilation, tensorflow::int64 rhs_col_dilation); [all …]
|
D | runtime_single_threaded_conv2d.cc | 25 Eigen::half* rhs, tensorflow::int64 input_batch, in __xla_cpu_runtime_EigenSingleThreadedConvF16() 26 tensorflow::int64 input_rows, tensorflow::int64 input_cols, in __xla_cpu_runtime_EigenSingleThreadedConvF16() 27 tensorflow::int64 input_channels, tensorflow::int64 kernel_rows, in __xla_cpu_runtime_EigenSingleThreadedConvF16() 28 tensorflow::int64 kernel_cols, tensorflow::int64 kernel_channels, in __xla_cpu_runtime_EigenSingleThreadedConvF16() 29 tensorflow::int64 kernel_filters, tensorflow::int64 output_rows, in __xla_cpu_runtime_EigenSingleThreadedConvF16() 30 tensorflow::int64 output_cols, tensorflow::int64 row_stride, in __xla_cpu_runtime_EigenSingleThreadedConvF16() 31 tensorflow::int64 col_stride, tensorflow::int64 padding_top, in __xla_cpu_runtime_EigenSingleThreadedConvF16() 32 tensorflow::int64 padding_bottom, tensorflow::int64 padding_left, in __xla_cpu_runtime_EigenSingleThreadedConvF16() 33 tensorflow::int64 padding_right, tensorflow::int64 lhs_row_dilation, in __xla_cpu_runtime_EigenSingleThreadedConvF16() 34 tensorflow::int64 lhs_col_dilation, tensorflow::int64 rhs_row_dilation, in __xla_cpu_runtime_EigenSingleThreadedConvF16() [all …]
|
D | runtime_conv2d.cc | 28 tensorflow::int64 input_batch, tensorflow::int64 input_rows, in __xla_cpu_runtime_EigenConvF32() 29 tensorflow::int64 input_cols, tensorflow::int64 input_channels, in __xla_cpu_runtime_EigenConvF32() 30 tensorflow::int64 kernel_rows, tensorflow::int64 kernel_cols, in __xla_cpu_runtime_EigenConvF32() 31 tensorflow::int64 kernel_channels, tensorflow::int64 kernel_filters, in __xla_cpu_runtime_EigenConvF32() 32 tensorflow::int64 output_rows, tensorflow::int64 output_cols, in __xla_cpu_runtime_EigenConvF32() 33 tensorflow::int64 row_stride, tensorflow::int64 col_stride, in __xla_cpu_runtime_EigenConvF32() 34 tensorflow::int64 padding_top, tensorflow::int64 padding_bottom, in __xla_cpu_runtime_EigenConvF32() 35 tensorflow::int64 padding_left, tensorflow::int64 padding_right, in __xla_cpu_runtime_EigenConvF32() 36 tensorflow::int64 lhs_row_dilation, tensorflow::int64 lhs_col_dilation, in __xla_cpu_runtime_EigenConvF32() 37 tensorflow::int64 rhs_row_dilation, tensorflow::int64 rhs_col_dilation) { in __xla_cpu_runtime_EigenConvF32() [all …]
|
D | runtime_conv2d_mkl.cc | 21 using tensorflow::int64; 30 // Downcast an int64 to int and check if value is in range. 31 int ToInt(int64 input) { in ToInt() 33 if (static_cast<int64>(output) != input) { in ToInt() 34 std::cerr << "Error occurred in downcasting int64 to int32: Value " << input in ToInt() 53 ScalarType* rhs, int64 input_batch, int64 input_rows, in MKLConvImpl() 54 int64 input_cols, int64 input_channels, int64 kernel_rows, in MKLConvImpl() 55 int64 kernel_cols, int64 kernel_channels, int64 kernel_filters, in MKLConvImpl() 56 int64 output_rows, int64 output_cols, int64 row_stride, in MKLConvImpl() 57 int64 col_stride, int64 padding_top, int64 padding_bottom, in MKLConvImpl() [all …]
|
/external/tensorflow/tensorflow/core/profiler/internal/ |
D | tfprof_timeline.h | 38 const string& name, int64 pid, int64 tid, int64 ts); 40 void EmitPID(const string& name, int64 pid); 42 void EmitRegion(int64 ts, int64 duration, int64 pid, int64 tid, 45 void EmitFlowStart(const string& name, int64 ts, int64 pid, int64 tid, 46 int64 flow_id); 48 void EmitFlowEnd(const string& name, int64 ts, int64 pid, int64 tid, 49 int64 flow_id); 51 void EmitCounter(const string& category, const string& name, int64 pid, 52 int64 ts, const string& device, int64 bytes, 53 const std::map<int64, std::vector<string>>& tensor_mem); [all …]
|
D | tfprof_node.h | 38 std::vector<int64> ShapeProtoToVec(const TensorShapeProto& shape_pb); 40 TensorShapeProto VecToShapeProto(const std::vector<int64>& shape_vec); 49 const std::map<int64, string>* id_to_string) in Trace() argument 67 const std::map<int64, string>* id_to_string_; 70 CallStack(const CodeDef& def, const std::map<int64, string>* id_to_string) in CallStack() argument 94 int64 run_count() const { return exec_.run_count(); } in run_count() 97 int64 exec_micros() const; 99 int64 accelerator_exec_micros() const; 101 int64 cpu_exec_micros() const; 103 const std::map<string, std::vector<std::pair<int64, int64>>>& op_execs() in op_execs() [all …]
|
/external/tensorflow/tensorflow/core/profiler/ |
D | tfprof_output.proto | 13 repeated int64 value_int64 = 3; 26 int64 run_count = 21; 27 int64 exec_micros = 2; 28 int64 accelerator_exec_micros = 17; 29 int64 cpu_exec_micros = 18; 32 int64 requested_bytes = 3; 34 int64 peak_bytes = 24; 36 int64 residual_bytes = 25; 38 int64 output_bytes = 26; 41 int64 parameters = 4; [all …]
|
D | profile.proto | 13 int64 drop_frames = 7; 14 int64 keep_frames = 8; 15 int64 time_nanos = 9; 16 int64 duration_nanos = 10; 18 int64 period = 12; 19 repeated int64 comment = 13; 20 int64 default_sample_type = 14; 24 int64 type = 1; 25 int64 unit = 2; 30 repeated int64 value = 2; [all …]
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | reference_util.cc | 39 for (int64 rowno = 0; rowno < input.height(); ++rowno) { in Array2DF32ToF64() 40 for (int64 colno = 0; colno < input.height(); ++colno) { in Array2DF32ToF64() 48 const Array3D<float>& lhs, const Array3D<float>& rhs, int64 kernel_stride, in ConvArray3D() 57 const Array3D<float>& lhs, const Array3D<float>& rhs, int64 kernel_stride, in ConvArray3DGeneralDimensionsDilated() 58 Padding padding, int64 lhs_dilation, int64 rhs_dilation, in ConvArray3DGeneralDimensionsDilated() 67 a4dlhs.Each([&](absl::Span<const int64> indices, float* value_ptr) { in ConvArray3DGeneralDimensionsDilated() 72 a4drhs.Each([&](absl::Span<const int64> indices, float* value_ptr) { in ConvArray3DGeneralDimensionsDilated() 87 convr4->Each([&](absl::Span<const int64> indices, float* value_ptr) { in ConvArray3DGeneralDimensionsDilated() 96 std::pair<int64, int64> kernel_stride, Padding padding) { in ConvArray4D() argument 106 std::pair<int64, int64> kernel_stride, in SeparableConvArray4D() argument [all …]
|
D | reference_util.h | 48 for (int64 w = 0; w < operand.width(); ++w) { in TransposeArray2D() 49 for (int64 h = 0; h < operand.height(); ++h) { in TransposeArray2D() 73 std::pair<int64, int64> kernel_stride, Padding padding); 79 std::pair<int64, int64> kernel_stride, Padding padding, 86 std::pair<int64, int64> kernel_stride, Padding padding, 87 std::pair<int64, int64> lhs_dilation, 88 std::pair<int64, int64> rhs_dilation, ConvolutionDimensionNumbers dnums); 95 int64 kernel_stride, 100 const Array3D<float>& lhs, const Array3D<float>& rhs, int64 kernel_stride, 101 Padding padding, int64 lhs_dilation, int64 rhs_dilation, [all …]
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | xla_builder.h | 111 XlaOp(int64 handle, XlaBuilder* builder) in XlaOp() 114 int64 handle() const { return handle_; } in handle() 121 int64 handle_; 240 static constexpr int64 kConvBatchDimension = 0; 241 static constexpr int64 kConvFeatureDimension = 1; 242 static constexpr int64 kConvFirstSpatialDimension = 2; 243 static constexpr int64 kConvSecondSpatialDimension = 3; 244 static constexpr int64 kConvKernelOutputDimension = 0; 245 static constexpr int64 kConvKernelInputDimension = 1; 246 static constexpr int64 kConvKernelFirstSpatialDimension = 2; [all …]
|
/external/tensorflow/tensorflow/core/framework/ |
D | kernel_shape_util.cc | 20 Status GetWindowedOutputSizeVerboseV2(int64 input_size, int64 filter_size, in GetWindowedOutputSizeVerboseV2() 21 int64 dilation_rate, int64 stride, in GetWindowedOutputSizeVerboseV2() 22 Padding padding_type, int64* output_size, in GetWindowedOutputSizeVerboseV2() 23 int64* padding_before, in GetWindowedOutputSizeVerboseV2() 24 int64* padding_after) { in GetWindowedOutputSizeVerboseV2() 34 int64 effective_filter_size = (filter_size - 1) * dilation_rate + 1; in GetWindowedOutputSizeVerboseV2() 47 const int64 padding_needed = in GetWindowedOutputSizeVerboseV2() 48 std::max(int64{0}, (*output_size - 1) * stride + in GetWindowedOutputSizeVerboseV2() 66 Status GetWindowedOutputSizeVerbose(int64 input_size, int64 filter_size, in GetWindowedOutputSizeVerbose() 67 int64 stride, Padding padding_type, in GetWindowedOutputSizeVerbose() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_sharding_util.cc | 47 for (int64 i = 0; i < lhs_shardings.size(); ++i) { in IsShardingMoreSpecific() 74 for (int64 i = 0; i < old.tuple_elements().size(); ++i) { in MergeSharding() 88 int64 num_devices = old.tile_assignment().num_elements(); in MergeSharding() 89 std::vector<int64> new_tile_dims; in MergeSharding() 92 for (int64 i = 0; i < to_merge->tile_assignment().num_dimensions() - 1; ++i) { in MergeSharding() 93 int64 new_dim = to_merge->tile_assignment().dim(i); in MergeSharding() 94 int64 old_dim = old.tile_assignment().dim(i); in MergeSharding() 106 int64 replication = num_devices / Product(new_tile_dims); in MergeSharding() 112 Array<int64> new_tile(new_tile_dims); in MergeSharding() 114 absl::flat_hash_map<int64, std::set<int64>> old_group_members; in MergeSharding() [all …]
|
D | space_to_batch_converter.cc | 71 int64 spatial_dimension_to_split, inherent_low_padding, 116 StatusOr<std::pair<HloInstruction*, std::vector<int64>>> SplitSpace( 118 int64& spatial_dimension_to_split, int64& activations_batch_dim, 119 int64 high_padding, int64 low_padding, int64 spatial_split_size, 120 int64 num_splits, bool is_backprop = false, bool is_rhs = false); 125 HloInstruction* activations, int64 spatial_dimension_to_split, 126 int64 activations_batch_dim, int64 high_padding, int64 low_padding, 127 int64 spatial_split_size, int64 num_splits); 152 HloInstruction* select_val, int64 new_batch_dim, int64 new_space_dim, 153 int64 old_batch_dim, int64 old_space_dim); [all …]
|
D | memory_space_assignment.h | 36 int64 size; 47 AssignmentInformation* assignment_information_for_space(int64 memory_space) { in assignment_information_for_space() 62 absl::Span<const std::pair<int64, AssignmentInformation>> 73 std::vector<std::pair<int64, AssignmentInformation>> assignment_info_; 123 absl::optional<int64> operand_in_alternate_mem = absl::nullopt, 137 absl::optional<int64> operand_in_alternate_mem, 144 int64 GetScheduleEndTime() const; 190 int64 start_time, 191 int64 end_time) const = 0; 195 virtual int64 PreferredEvictionEndTime(const Shape& shape, int64 start_time, [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | spmd_partitioner_util.cc | 58 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in CreateConstant() 74 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in CreateZero() 95 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in CreateOne() 129 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in EvenlyPartitions() 140 for (int64 i = 0; i < shape.dimensions_size(); ++i) { in EvenlyPartitions() 151 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in MakePartitionedShape() 161 int64 ShapeSizeInBytes(const Shape& shape) { in ShapeSizeInBytes() 168 int64 partition_id) { in MakeNonPaddedShapeForGivenPartition() 171 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in MakeNonPaddedShapeForGivenPartition() 190 std::vector<int64> tile_offset = in MakeNonPaddedShapeForGivenPartition() [all …]
|
/external/tensorflow/tensorflow/core/ops/ |
D | sparse_ops.cc | 46 .Input("a_indices: int64") 47 .Input("b_indices: int64") 48 .Input("sum_indices: int64") 63 .Input("a_indices: int64") 65 .Input("a_shape: int64") 66 .Input("b_indices: int64") 68 .Input("b_shape: int64") 70 .Output("sum_indices: int64") 72 .Output("sum_shape: int64") 88 .Input("a_shape: int64") [all …]
|
/external/tensorflow/tensorflow/core/kernels/image/ |
D | scale_and_translate_op_test.cc | 98 const int64 in_height = images.dimension(1); in Sample() 99 const int64 in_width = images.dimension(2); in Sample() 101 const int64 y_span_start = Clamp( in Sample() 102 static_cast<int64>(0), in_height - 1, in Sample() 103 static_cast<int64>( in Sample() 105 const int64 y_span_end = in Sample() 106 Clamp(static_cast<int64>(0), in_height - 1, in Sample() 107 static_cast<int64>(std::floor( in Sample() 110 const int64 x_span_start = Clamp( in Sample() 111 static_cast<int64>(0), in_width - 1, in Sample() [all …]
|