Home
last modified time | relevance | path

Searched refs:int64 (Results 1 – 25 of 3907) sorted by relevance

12345678910>>...157

/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Druntime_conv2d.h26 float* lhs, float* rhs, tensorflow::int64 input_batch,
27 tensorflow::int64 input_rows, tensorflow::int64 input_cols,
28 tensorflow::int64 input_channels, tensorflow::int64 kernel_rows,
29 tensorflow::int64 kernel_cols, tensorflow::int64 kernel_channels,
30 tensorflow::int64 kernel_filters, tensorflow::int64 output_rows,
31 tensorflow::int64 output_cols, tensorflow::int64 row_stride,
32 tensorflow::int64 col_stride, tensorflow::int64 padding_top,
33 tensorflow::int64 padding_bottom, tensorflow::int64 padding_left,
34 tensorflow::int64 padding_right, tensorflow::int64 lhs_row_dilation,
35 tensorflow::int64 lhs_col_dilation, tensorflow::int64 rhs_row_dilation,
[all …]
Druntime_single_threaded_conv2d.h27 tensorflow::int64 input_batch, tensorflow::int64 input_rows,
28 tensorflow::int64 input_cols, tensorflow::int64 input_channels,
29 tensorflow::int64 kernel_rows, tensorflow::int64 kernel_cols,
30 tensorflow::int64 kernel_channels, tensorflow::int64 kernel_filters,
31 tensorflow::int64 output_rows, tensorflow::int64 output_cols,
32 tensorflow::int64 row_stride, tensorflow::int64 col_stride,
33 tensorflow::int64 padding_top, tensorflow::int64 padding_bottom,
34 tensorflow::int64 padding_left, tensorflow::int64 padding_right,
35 tensorflow::int64 lhs_row_dilation, tensorflow::int64 lhs_col_dilation,
36 tensorflow::int64 rhs_row_dilation, tensorflow::int64 rhs_col_dilation);
[all …]
Druntime_single_threaded_conv2d.cc25 Eigen::half* rhs, tensorflow::int64 input_batch, in __xla_cpu_runtime_EigenSingleThreadedConvF16()
26 tensorflow::int64 input_rows, tensorflow::int64 input_cols, in __xla_cpu_runtime_EigenSingleThreadedConvF16()
27 tensorflow::int64 input_channels, tensorflow::int64 kernel_rows, in __xla_cpu_runtime_EigenSingleThreadedConvF16()
28 tensorflow::int64 kernel_cols, tensorflow::int64 kernel_channels, in __xla_cpu_runtime_EigenSingleThreadedConvF16()
29 tensorflow::int64 kernel_filters, tensorflow::int64 output_rows, in __xla_cpu_runtime_EigenSingleThreadedConvF16()
30 tensorflow::int64 output_cols, tensorflow::int64 row_stride, in __xla_cpu_runtime_EigenSingleThreadedConvF16()
31 tensorflow::int64 col_stride, tensorflow::int64 padding_top, in __xla_cpu_runtime_EigenSingleThreadedConvF16()
32 tensorflow::int64 padding_bottom, tensorflow::int64 padding_left, in __xla_cpu_runtime_EigenSingleThreadedConvF16()
33 tensorflow::int64 padding_right, tensorflow::int64 lhs_row_dilation, in __xla_cpu_runtime_EigenSingleThreadedConvF16()
34 tensorflow::int64 lhs_col_dilation, tensorflow::int64 rhs_row_dilation, in __xla_cpu_runtime_EigenSingleThreadedConvF16()
[all …]
Druntime_conv2d.cc28 tensorflow::int64 input_batch, tensorflow::int64 input_rows, in __xla_cpu_runtime_EigenConvF32()
29 tensorflow::int64 input_cols, tensorflow::int64 input_channels, in __xla_cpu_runtime_EigenConvF32()
30 tensorflow::int64 kernel_rows, tensorflow::int64 kernel_cols, in __xla_cpu_runtime_EigenConvF32()
31 tensorflow::int64 kernel_channels, tensorflow::int64 kernel_filters, in __xla_cpu_runtime_EigenConvF32()
32 tensorflow::int64 output_rows, tensorflow::int64 output_cols, in __xla_cpu_runtime_EigenConvF32()
33 tensorflow::int64 row_stride, tensorflow::int64 col_stride, in __xla_cpu_runtime_EigenConvF32()
34 tensorflow::int64 padding_top, tensorflow::int64 padding_bottom, in __xla_cpu_runtime_EigenConvF32()
35 tensorflow::int64 padding_left, tensorflow::int64 padding_right, in __xla_cpu_runtime_EigenConvF32()
36 tensorflow::int64 lhs_row_dilation, tensorflow::int64 lhs_col_dilation, in __xla_cpu_runtime_EigenConvF32()
37 tensorflow::int64 rhs_row_dilation, tensorflow::int64 rhs_col_dilation) { in __xla_cpu_runtime_EigenConvF32()
[all …]
Druntime_conv2d_mkl.cc21 using tensorflow::int64;
31 int ToInt(int64 input) { in ToInt()
33 if (static_cast<int64>(output) != input) { in ToInt()
53 ScalarType* rhs, int64 input_batch, int64 input_rows, in MKLConvImpl()
54 int64 input_cols, int64 input_channels, int64 kernel_rows, in MKLConvImpl()
55 int64 kernel_cols, int64 kernel_channels, int64 kernel_filters, in MKLConvImpl()
56 int64 output_rows, int64 output_cols, int64 row_stride, in MKLConvImpl()
57 int64 col_stride, int64 padding_top, int64 padding_bottom, in MKLConvImpl()
58 int64 padding_left, int64 padding_right, in MKLConvImpl()
59 int64 lhs_row_dilation, int64 lhs_col_dilation, in MKLConvImpl()
[all …]
Druntime_conv2d_mkl.h26 float* lhs, float* rhs, tensorflow::int64 input_batch,
27 tensorflow::int64 input_rows, tensorflow::int64 input_cols,
28 tensorflow::int64 input_channels, tensorflow::int64 kernel_rows,
29 tensorflow::int64 kernel_cols, tensorflow::int64 kernel_channels,
30 tensorflow::int64 kernel_filters, tensorflow::int64 output_rows,
31 tensorflow::int64 output_cols, tensorflow::int64 row_stride,
32 tensorflow::int64 col_stride, tensorflow::int64 padding_top,
33 tensorflow::int64 padding_bottom, tensorflow::int64 padding_left,
34 tensorflow::int64 padding_right, tensorflow::int64 lhs_row_dilation,
35 tensorflow::int64 lhs_col_dilation, tensorflow::int64 rhs_row_dilation,
[all …]
/external/tensorflow/tensorflow/core/kernels/
Ddeep_conv2d.cc48 static int64 GetDeepConvCost(int input_tile_rows, int input_tile_cols, in GetDeepConvCost()
52 const int64 input_tile_spatial_size = input_tile_rows * input_tile_cols; in GetDeepConvCost()
53 const int64 input_transform_cost = in GetDeepConvCost()
57 const int64 product_cost = input_tile_spatial_size * in_depth * out_depth; in GetDeepConvCost()
60 const int64 output_tile_spatial_size = out_tile_rows * out_tile_cols; in GetDeepConvCost()
61 const int64 output_transform_cost = in GetDeepConvCost()
65 const int64 row_tiles = (out_rows + out_tile_rows - 1) / out_tile_rows; in GetDeepConvCost()
66 const int64 col_tiles = (out_cols + out_tile_cols - 1) / out_tile_cols; in GetDeepConvCost()
67 const int64 num_tiles = row_tiles * col_tiles; in GetDeepConvCost()
74 static int64 GetDirectConvCost(int filter_rows, int filter_cols, int in_depth, in GetDirectConvCost()
[all …]
Dragged_tensor_from_variant_op_test.cc79 const std::vector<int64> split_1 = {0, 1, 2, 3, 4, 5}; in TEST_F()
80 const std::vector<int64> split_2 = {0, 1, 2, 5, 6, 7}; in TEST_F()
83 auto encoded_variant = CreateVariantFromRagged<int, int64>( in TEST_F()
89 test::FillValues<int64>(&expected_splits_1, split_1); in TEST_F()
90 test::FillValues<int64>(&expected_splits_2, split_2); in TEST_F()
95 BuildDecodeRaggedTensorGraph<int, int64>(input_ragged_rank, in TEST_F()
100 test::ExpectTensorEqual<int64>(*GetOutput(0), expected_splits_1); in TEST_F()
101 test::ExpectTensorEqual<int64>(*GetOutput(1), expected_splits_2); in TEST_F()
106 const std::vector<int64> split_1 = {0, 1, 2, 3, 4, 5}; in TEST_F()
107 const std::vector<int64> split_2 = {0, 1, 2, 5, 6, 7}; in TEST_F()
[all …]
Dsparse_cross_op.cc44 virtual int64 FeatureCount(int64 batch) const = 0;
47 virtual InternalType Feature(int64 batch, int64 n,
57 SparseTensorColumn(const Tensor& values, std::vector<int64> feature_counts, in SparseTensorColumn()
58 std::vector<int64> feature_start_indices) in SparseTensorColumn()
65 int64 FeatureCount(int64 batch) const override { in FeatureCount()
69 InternalType Feature(int64 batch, int64 n, bool strong_hash) const override;
75 std::vector<int64> feature_counts_;
76 std::vector<int64> feature_start_indices_;
84 std::vector<int64> feature_counts, in KeyedSparseTensorColumn()
85 std::vector<int64> feature_start_indices, in KeyedSparseTensorColumn()
[all …]
Dragged_tensor_to_variant_op_test.cc47 int64 num_splits = ragged_splits.size(); in BuildEncodeRaggedTensorGraph()
59 int64 splits_size = splits.size(); in BuildEncodeRaggedTensorGraph()
105 const std::vector<int64> batched_splits_1 = {0, 2, 3, 3}; in TEST_F()
106 const std::vector<int64> batched_splits_2 = {0, 0, 0, 0}; in TEST_F()
108 BuildEncodeRaggedTensorGraph<int, int64>({batched_splits_1, batched_splits_2}, in TEST_F()
115 ExpectRaggedTensorVariantEqual<int, int64>( in TEST_F()
116 CreateVariantFromRagged<int, int64>({{0, 0, 0}}, {}), in TEST_F()
118 ExpectRaggedTensorVariantEqual<int, int64>( in TEST_F()
119 CreateVariantFromRagged<int, int64>({{0, 0}}, {}), in TEST_F()
121 ExpectRaggedTensorVariantEqual<int, int64>( in TEST_F()
[all …]
Drange_sampler.h39 explicit RangeSampler(int64 range) : range_(range) { CHECK_GT(range_, 0); } in RangeSampler()
43 virtual int64 Sample(random::SimplePhilox* rnd) const = 0;
47 virtual float Probability(int64 value) const = 0;
53 gtl::MutableArraySlice<int64> batch) const;
76 gtl::MutableArraySlice<int64> batch,
78 gtl::ArraySlice<int64> extras,
87 gtl::MutableArraySlice<int64> batch,
89 gtl::ArraySlice<int64> extras,
91 gtl::ArraySlice<int64> avoided_values) const;
97 virtual void Update(gtl::ArraySlice<int64> values) { in Update()
[all …]
/external/tensorflow/tensorflow/core/framework/
Dkernel_shape_util.h74 Status GetWindowedOutputSize(int64 input_size, int64 filter_size, int64 stride,
75 Padding padding_type, int64* output_size,
76 int64* padding_size);
102 Status GetWindowedOutputSizeV2(int64 input_size, int64 filter_size,
103 int64 dilation_rate, int64 stride,
104 Padding padding_type, int64* output_size,
105 int64* padding_size);
114 Status GetWindowedOutputSizeVerbose(int64 input_size, int64 filter_size,
115 int64 stride, Padding padding_type,
116 int64* output_size, int64* padding_before,
[all …]
Dkernel_shape_util.cc20 Status GetWindowedOutputSizeVerboseV2(int64 input_size, int64 filter_size, in GetWindowedOutputSizeVerboseV2()
21 int64 dilation_rate, int64 stride, in GetWindowedOutputSizeVerboseV2()
22 Padding padding_type, int64* output_size, in GetWindowedOutputSizeVerboseV2()
23 int64* padding_before, in GetWindowedOutputSizeVerboseV2()
24 int64* padding_after) { in GetWindowedOutputSizeVerboseV2()
34 int64 effective_filter_size = (filter_size - 1) * dilation_rate + 1; in GetWindowedOutputSizeVerboseV2()
47 const int64 padding_needed = in GetWindowedOutputSizeVerboseV2()
48 std::max(int64{0}, (*output_size - 1) * stride + in GetWindowedOutputSizeVerboseV2()
66 Status GetWindowedOutputSizeVerbose(int64 input_size, int64 filter_size, in GetWindowedOutputSizeVerbose()
67 int64 stride, Padding padding_type, in GetWindowedOutputSizeVerbose()
[all …]
/external/tensorflow/tensorflow/compiler/tf2xla/g3doc/
Dcpu_supported_ops.md5 `Abs` | `T={double,float,int32,int64}`
6 `Acos` | `T={complex64,double,float,int32,int64}`
8 `Add` | `T={complex64,double,float,int32,int64}`
9 `AddN` | `T={complex64,double,float,int32,int64,uint32,uint64}`
13 `All` | `Tidx={int32,int64}`
15 `Any` | `Tidx={int32,int64}`
16 `ApproximateEqual` | `T={complex64,double,float,int32,int64,uint32,uint64}`
17 `ArgMax` | `Tidx={int32,int64}`<br>`output_type={int32,int64}`<br>`T={…
18 … | `Tidx={int32,int64}`<br>`output_type={int32,int64}`<br>`T={complex64,double…
19 `Asin` | `T={complex64,double,float,int32,int64}`
[all …]
Dgpu_supported_ops.md5 `Abs` | `T={double,float,int32,int64}`
6 `Acos` | `T={complex64,double,float,int32,int64}`
8 `Add` | `T={complex64,double,float,int32,int64}`
9 `AddN` | `T={complex64,double,float,int32,int64,uint32,uint64}`
13 `All` | `Tidx={int32,int64}`
15 `Any` | `Tidx={int32,int64}`
16 `ApproximateEqual` | `T={complex64,double,float,int32,int64,uint32,uint64}`
17 … | `Tidx={int32,int64}`<br>`output_type={int32,int64}`<br>`T={complex64,double…
18 … | `Tidx={int32,int64}`<br>`output_type={int32,int64}`<br>`T={complex64,double…
19 `Asin` | `T={complex64,double,float,int32,int64}`
[all …]
/external/tensorflow/tensorflow/core/profiler/internal/
Dtfprof_timeline.h38 const string& name, int64 pid, int64 tid, int64 ts);
40 void EmitPID(const string& name, int64 pid);
42 void EmitRegion(int64 ts, int64 duration, int64 pid, int64 tid,
45 void EmitFlowStart(const string& name, int64 ts, int64 pid, int64 tid,
46 int64 flow_id);
48 void EmitFlowEnd(const string& name, int64 ts, int64 pid, int64 tid,
49 int64 flow_id);
51 void EmitCounter(const string& category, const string& name, int64 pid,
52 int64 ts, const string& device, int64 bytes,
53 const std::map<int64, std::vector<string>>& tensor_mem);
[all …]
Dtfprof_node.h38 std::vector<int64> ShapeProtoToVec(const TensorShapeProto& shape_pb);
40 TensorShapeProto VecToShapeProto(const std::vector<int64>& shape_vec);
49 const std::map<int64, string>* id_to_string) in Trace() argument
67 const std::map<int64, string>* id_to_string_;
70 CallStack(const CodeDef& def, const std::map<int64, string>* id_to_string) in CallStack() argument
94 int64 run_count() const { return exec_.run_count(); } in run_count()
97 int64 exec_micros() const;
99 int64 accelerator_exec_micros() const;
101 int64 cpu_exec_micros() const;
103 const std::map<string, std::vector<std::pair<int64, int64>>>& op_execs() in op_execs()
[all …]
/external/tensorflow/tensorflow/compiler/xla/service/
Dmemory_space_assignment.h36 int64 size;
47 AssignmentInformation* assignment_information_for_space(int64 memory_space) { in assignment_information_for_space()
62 absl::Span<const std::pair<int64, AssignmentInformation>>
73 std::vector<std::pair<int64, AssignmentInformation>> assignment_info_;
123 absl::optional<int64> operand_in_alternate_mem = absl::nullopt,
137 absl::optional<int64> operand_in_alternate_mem,
144 int64 GetScheduleEndTime() const;
190 int64 start_time,
191 int64 end_time) const = 0;
195 virtual int64 PreferredEvictionEndTime(const Shape& shape, int64 start_time,
[all …]
Dhlo_sharding_util.cc47 for (int64 i = 0; i < lhs_shardings.size(); ++i) { in IsShardingMoreSpecific()
74 for (int64 i = 0; i < old.tuple_elements().size(); ++i) { in MergeSharding()
88 int64 num_devices = old.tile_assignment().num_elements(); in MergeSharding()
89 std::vector<int64> new_tile_dims; in MergeSharding()
92 for (int64 i = 0; i < to_merge->tile_assignment().num_dimensions() - 1; ++i) { in MergeSharding()
93 int64 new_dim = to_merge->tile_assignment().dim(i); in MergeSharding()
94 int64 old_dim = old.tile_assignment().dim(i); in MergeSharding()
106 int64 replication = num_devices / Product(new_tile_dims); in MergeSharding()
112 Array<int64> new_tile(new_tile_dims); in MergeSharding()
114 absl::flat_hash_map<int64, std::set<int64>> old_group_members; in MergeSharding()
[all …]
/external/tensorflow/tensorflow/core/kernels/data/
Dpadded_batch_dataset_op_test.cc28 PaddedBatchDatasetParams(T input_dataset_params, int64 batch_size, in PaddedBatchDatasetParams()
53 CreateTensor<int64>(TensorShape({}), {batch_size_})); in GetInputTensors()
96 int64 batch_size_;
107 /*components=*/{CreateTensor<int64>( in PaddedBatchDatasetParams1()
113 /*padded_shapes=*/{CreateTensor<int64>(TensorShape{1}, {3})}, in PaddedBatchDatasetParams1()
114 /*padded_values=*/{CreateTensor<int64>(TensorShape{}, {1})}, in PaddedBatchDatasetParams1()
126 /*components=*/CreateTensors<int64>(TensorShape{3, 2}, in PaddedBatchDatasetParams2()
130 /*components=*/CreateTensors<int64>(TensorShape{4, 1}, {{6, 7, 8, 9}}), in PaddedBatchDatasetParams2()
141 /*padded_shapes=*/{CreateTensor<int64>(TensorShape{1}, {3})}, in PaddedBatchDatasetParams2()
142 /*padded_values=*/{CreateTensor<int64>(TensorShape{}, {1})}, in PaddedBatchDatasetParams2()
[all …]
/external/tensorflow/tensorflow/core/platform/
Dthreadpool.h77 absl::optional<int64> cost_per_unit, in SchedulingParams()
78 absl::optional<int64> block_size) in SchedulingParams()
84 absl::optional<int64> cost_per_unit() const { return cost_per_unit_; } in cost_per_unit()
85 absl::optional<int64> block_size() const { return block_size_; } in block_size()
95 absl::optional<int64> cost_per_unit_;
99 absl::optional<int64> block_size_;
148 int NumShardsUsedByFixedBlockSizeScheduling(const int64 total,
149 const int64 block_size);
154 int NumShardsUsedByTransformRangeConcurrently(const int64 block_size,
155 const int64 total);
[all …]
/external/tensorflow/tensorflow/compiler/xla/client/
Dxla_builder.h111 XlaOp(int64 handle, XlaBuilder* builder) in XlaOp()
114 int64 handle() const { return handle_; } in handle()
121 int64 handle_;
240 static constexpr int64 kConvBatchDimension = 0;
241 static constexpr int64 kConvFeatureDimension = 1;
242 static constexpr int64 kConvFirstSpatialDimension = 2;
243 static constexpr int64 kConvSecondSpatialDimension = 3;
244 static constexpr int64 kConvKernelOutputDimension = 0;
245 static constexpr int64 kConvKernelInputDimension = 1;
246 static constexpr int64 kConvKernelFirstSpatialDimension = 2;
[all …]
/external/tensorflow/tensorflow/compiler/xla/
Dreference_util.cc39 for (int64 rowno = 0; rowno < input.height(); ++rowno) { in Array2DF32ToF64()
40 for (int64 colno = 0; colno < input.height(); ++colno) { in Array2DF32ToF64()
48 const Array3D<float>& lhs, const Array3D<float>& rhs, int64 kernel_stride, in ConvArray3D()
57 const Array3D<float>& lhs, const Array3D<float>& rhs, int64 kernel_stride, in ConvArray3DGeneralDimensionsDilated()
58 Padding padding, int64 lhs_dilation, int64 rhs_dilation, in ConvArray3DGeneralDimensionsDilated()
67 a4dlhs.Each([&](absl::Span<const int64> indices, float* value_ptr) { in ConvArray3DGeneralDimensionsDilated()
72 a4drhs.Each([&](absl::Span<const int64> indices, float* value_ptr) { in ConvArray3DGeneralDimensionsDilated()
87 convr4->Each([&](absl::Span<const int64> indices, float* value_ptr) { in ConvArray3DGeneralDimensionsDilated()
96 std::pair<int64, int64> kernel_stride, Padding padding) { in ConvArray4D() argument
106 std::pair<int64, int64> kernel_stride, in SeparableConvArray4D() argument
[all …]
Darray4d.h57 Array4D() : Array<T>(std::vector<int64>{0, 0, 0, 0}) {} in Array4D()
60 Array4D(int64 planes, int64 depth, int64 height, int64 width) in Array4D()
61 : Array<T>(std::vector<int64>{planes, depth, height, width}) {} in Array4D()
64 Array4D(int64 planes, int64 depth, int64 height, int64 width, T value) in Array4D()
65 : Array<T>(std::vector<int64>{planes, depth, height, width}, value) {} in Array4D()
73 Array4D(int64 planes, int64 depth, int64 height, int64 width, in Array4D()
100 int64 n4() const { return this->dim(3); } in n4()
101 int64 n3() const { return this->dim(2); } in n3()
102 int64 n2() const { return this->dim(1); } in n2()
103 int64 n1() const { return this->dim(0); } in n1()
[all …]
/external/tensorflow/tensorflow/core/profiler/convert/
Dxplane_to_memory_profile_test.cc43 {{StatType::kBytesReserved, int64{2000}}, in TEST()
44 {StatType::kBytesAllocated, int64{3000}}, in TEST()
45 {StatType::kBytesAvailable, int64{5000}}, in TEST()
46 {StatType::kPeakBytesInUse, int64{8500}}, in TEST()
47 {StatType::kRequestedBytes, int64{200}}, in TEST()
48 {StatType::kAllocationBytes, int64{256}}, in TEST()
49 {StatType::kAddress, int64{222333}}, in TEST()
50 {StatType::kStepId, int64{-93746}}, in TEST()
51 {StatType::kDataType, int64{1}}, in TEST()
59 {{StatType::kBytesReserved, int64{2000}}, in TEST()
[all …]

12345678910>>...157