/external/tensorflow/tensorflow/lite/delegates/hexagon/builders/ |
D | reshape_builder.cc | 41 int num_dimensions = params->num_dimensions; in PopulateShapeFromParam() local 42 if (num_dimensions == 1 && params->shape[0] == 0) { in PopulateShapeFromParam() 46 num_dimensions = 0; in PopulateShapeFromParam() 48 for (int i = 0; i < num_dimensions; ++i) { in PopulateShapeFromParam()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | reshape.cc | 92 int num_dimensions = params->num_dimensions; in GetOutputShapeFromParam() local 93 if (num_dimensions == 1 && params->shape[0] == 0) { in GetOutputShapeFromParam() 97 num_dimensions = 0; in GetOutputShapeFromParam() 99 TfLiteIntArray* output_shape = TfLiteIntArrayCreate(num_dimensions); in GetOutputShapeFromParam() 100 for (int i = 0; i < num_dimensions; ++i) { in GetOutputShapeFromParam()
|
D | tile.cc | 41 int num_dimensions) { in MultiplyShapeDims() argument 44 TfLiteIntArray* output_shape = TfLiteIntArrayCreate(num_dimensions); in MultiplyShapeDims() 45 for (int i = 0; i < num_dimensions; ++i) { in MultiplyShapeDims() 61 const int num_dimensions = NumDimensions(input); in ResizeOutput() local 63 TF_LITE_ENSURE_EQ(context, num_dimensions, num_multipliers); in ResizeOutput() 69 num_dimensions)); in ResizeOutput() 74 num_dimensions)); in ResizeOutput()
|
D | topk_v2.cc | 48 const int num_dimensions = NumDimensions(input); in ResizeOutput() local 53 TF_LITE_ENSURE_MSG(context, k <= input->dims->data[num_dimensions - 1], in ResizeOutput() 56 TfLiteIntArray* output_indexes_shape = TfLiteIntArrayCreate(num_dimensions); in ResizeOutput() 57 TfLiteIntArray* output_values_shape = TfLiteIntArrayCreate(num_dimensions); in ResizeOutput() 58 for (int i = 0; i < num_dimensions - 1; ++i) { in ResizeOutput() 62 output_indexes_shape->data[num_dimensions - 1] = k; in ResizeOutput() 63 output_values_shape->data[num_dimensions - 1] = k; in ResizeOutput()
|
D | gather.cc | 90 const int num_dimensions = in Prepare() local 92 TfLiteIntArray* output_shape = TfLiteIntArrayCreate(num_dimensions); in Prepare()
|
/external/tensorflow/tensorflow/lite/delegates/coreml/builders/ |
D | reshape_op_builder.cc | 100 TfLiteIntArray* output_shape = TfLiteIntArrayCreate(params->num_dimensions); in RegisterInputs() 102 params->num_dimensions * sizeof(int)); in RegisterInputs() 125 return params->num_dimensions == 3 || params->num_dimensions == 4; in IsReshapeOpSupported()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_conv_runner.cc | 330 const int num_dimensions = window.dimensions_size(); in GetGpuConvConfig() local 331 CHECK_LE(num_dimensions, 3) << inst_as_string; in GetGpuConvConfig() 337 const int effective_num_dimensions = std::max(2, num_dimensions); in GetGpuConvConfig() 344 CHECK_EQ(num_dimensions, dnums.input_spatial_dimensions_size()) in GetGpuConvConfig() 346 CHECK_EQ(num_dimensions, dnums.kernel_spatial_dimensions_size()) in GetGpuConvConfig() 348 CHECK_EQ(num_dimensions, dnums.output_spatial_dimensions_size()) in GetGpuConvConfig() 380 for (int dim = 0; dim < num_dimensions; ++dim) { in GetGpuConvConfig() 394 for (int dim = 0; dim < num_dimensions; ++dim) { in GetGpuConvConfig() 403 for (int dim = 0; dim < num_dimensions; ++dim) { in GetGpuConvConfig() 422 for (int dim = 0; dim < num_dimensions; ++dim) { in GetGpuConvConfig() [all …]
|
/external/tflite-support/tensorflow_lite_support/cc/task/vision/ |
D | image_classifier.cc | 222 const int num_dimensions = output_tensor->dims->size; in CheckAndSetOutputs() local 223 if (num_dimensions == 4) { in CheckAndSetOutputs() 234 } else if (num_dimensions != 2) { in CheckAndSetOutputs() 241 i, num_dimensions), in CheckAndSetOutputs() 252 int num_classes = output_tensor->dims->data[num_dimensions - 1]; in CheckAndSetOutputs() 268 output_tensor->dims->data[num_dimensions - 1], i, in CheckAndSetOutputs()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_sharding.cc | 70 if (tile_assignment_last_dim_replicate.num_dimensions() == 1 || in PartialTile() 255 index.size() < tile_assignment().num_dimensions()) { in DeviceForTileIndex() 272 CHECK_EQ(shape.dimensions_size(), tile_assignment_.num_dimensions() - 1); in TileOffsetForDevice() 274 CHECK_EQ(shape.dimensions_size(), tile_assignment_.num_dimensions()); in TileOffsetForDevice() 296 tile_assignment_.num_dimensions()); in TileLimitForDevice() 456 tile_assignment_.num_dimensions()) { in ValidateNonTuple() 616 !absl::c_linear_search(dims, tile_assignment().num_dimensions() - 1)); in NumTiles() 619 CHECK(d < tile_assignment().num_dimensions()); in NumTiles()
|
D | hlo_sharding_util.cc | 91 new_tile_dims.reserve(to_merge->tile_assignment().num_dimensions()); in MergeSharding() 92 for (int64 i = 0; i < to_merge->tile_assignment().num_dimensions() - 1; ++i) { in MergeSharding() 275 dimensions.size() < sharding.tile_assignment().num_dimensions()) { in TransposeSharding() 449 std::vector<int64> tile_dims(sharding.tile_assignment().num_dimensions(), 1); in ReshapeToTileDimension() 453 for (int64 i = 0; i < sharding.tile_assignment().num_dimensions(); ++i) { in ReshapeToTileDimension() 1150 CHECK_LT(dim, sharding.tile_assignment().num_dimensions()); in PartiallyReplicateTiledShardingOnDims() 1161 sharding.tile_assignment().num_dimensions()); in PartiallyReplicateTiledShardingOnDims() 1190 new_tile_shape.reserve(sharding.tile_assignment().num_dimensions() - in RemoveShapeDimensions() 1192 for (int64 i = 0; i < sharding.tile_assignment().num_dimensions(); ++i) { in RemoveShapeDimensions() 1213 src_to_tgt.size() < source.tile_assignment().num_dimensions()) { in TransposeShardingWithCollapsedDims()
|
D | hlo_evaluator.cc | 964 const int64 num_dimensions = lengths.size(); in ComputeStrides() local 967 CHECK_EQ(num_dimensions, layout.minor_to_major_size()); in ComputeStrides() 971 std::vector<int64> strides(num_dimensions + 1); in ComputeStrides() 973 for (int64 i = 0; i < num_dimensions; i++) { in ComputeStrides() 975 const int64 index = (num_dimensions - 1) - layout.minor_to_major(i); in ComputeStrides() 979 strides[num_dimensions] = stride; in ComputeStrides()
|
/external/tensorflow/tensorflow/python/kernel_tests/random/ |
D | random_grad_test.py | 52 num_dimensions = array_ops.placeholder(dtypes.int32) 53 ret = random_grad.add_leading_unit_dimensions(x, num_dimensions) 55 ret_val = sess.run(ret, {x: np.ones([2, 2]), num_dimensions: 2})
|
/external/tensorflow/tensorflow/lite/micro/kernels/ |
D | concatenation.cc | 164 int num_dimensions = NumDimensions(input); in Prepare() local 166 if (num_dimensions > 4) { in Prepare() 171 num_dimensions); in Prepare()
|
D | gather.cc | 90 const int num_dimensions = in Prepare() local 92 TfLiteIntArray* output_shape = TfLiteIntArrayCreate(num_dimensions); in Prepare()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | array.h | 420 int64 num_dimensions() const { return sizes_.size(); } 455 CHECK_EQ(starts.size(), num_dimensions()); 456 CHECK_EQ(limits.size(), num_dimensions()); 480 CHECK_EQ(from.num_dimensions(), num_dimensions());
|
D | array_test.cc | 27 EXPECT_EQ(uninit.num_dimensions(), 2); in TEST()
|
/external/tensorflow/tensorflow/python/ops/ |
D | random_grad.py | 33 def add_leading_unit_dimensions(x, num_dimensions): # pylint: disable=invalid-name argument 35 [array_ops.ones([num_dimensions], dtype=dtypes.int32),
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | spmd_partitioner_util.cc | 308 int64 rank = partial_sharding.tile_assignment().num_dimensions() - 1; in PartialReplicateReshardCompatibleSharding() 309 int64 target_rank = target_sharding.tile_assignment().num_dimensions() - in PartialReplicateReshardCompatibleSharding() 635 ? sharding.tile_assignment().num_dimensions() - 1 in UniqueTiledDim() 636 : sharding.tile_assignment().num_dimensions(); in UniqueTiledDim() 1287 source.tile_assignment().num_dimensions() != in GetReshardAllToAllSourceTargetDims() 1288 target.tile_assignment().num_dimensions() || in GetReshardAllToAllSourceTargetDims() 1296 for (int64 i = 0; i < source.tile_assignment().num_dimensions(); ++i) { in GetReshardAllToAllSourceTargetDims() 1412 std::move(group_dim_sizes), sharding.tile_assignment().num_dimensions(), in GroupShardingOnDims() 1429 sharding.tile_assignment().num_dimensions() in GroupShardingOnDims() 1760 int64 rank = sharding.tile_assignment().num_dimensions(); in FindMatchingPartitionedDimsForGrouping()
|
D | spmd_partitioner.cc | 456 std::vector<int64> group_dims(target.tile_assignment().num_dimensions() - in ReshardNoCache() 997 tiling_dim_factors.reserve(target.tile_assignment().num_dimensions()); in ReshardFromPartialReplicateWithDynamicSlice() 1106 std::vector<int64> reshape_tile_dims(temp_target_tile.num_dimensions() + 2); in ReshardWithAllToAll() 1110 for (int64 j = 0; j < temp_target_tile.num_dimensions(); ++j) { in ReshardWithAllToAll() 1125 std::vector<int64> xpose_dims(temp_target_tile.num_dimensions()); in ReshardWithAllToAll() 1252 if (((tile_sharding.tile_assignment().num_dimensions() + 1) != in ReshardPartialReplicateWithAllToAll() 1253 partial_replicate_sharding.tile_assignment().num_dimensions()) || in ReshardPartialReplicateWithAllToAll() 1258 for (int i = tile_sharding.tile_assignment().num_dimensions() - 1; i >= 0; in ReshardPartialReplicateWithAllToAll() 3084 hlo->sharding().tile_assignment().num_dimensions() - 1); in HandleRng() 3550 for (int64 i = 0; i < sharding.tile_assignment().num_dimensions(); ++i) { in AllGatherShardsInternal()
|
/external/tensorflow/tensorflow/lite/c/ |
D | builtin_op_data.h | 334 int num_dimensions; member
|
/external/tensorflow/tensorflow/compiler/mlir/lite/ir/ |
D | tfl_ops.cc | 2543 ArrayRef<int64_t> output_shape, int num_dimensions, in ComputePermutation() argument 2548 assert(output_axis < num_dimensions); in ComputePermutation() 2555 const bool is_last_axis = output_axis == num_dimensions - 1; in ComputePermutation() 2559 ComputePermutation(input_tensor, perm, output_shape, num_dimensions, in ComputePermutation() 2579 const int num_dimensions = input_tensor.getType().getRank(); in fold() local 2580 assert(perm_tensor.getType().getNumElements() == num_dimensions); in fold() 2587 for (int i = 0; i < num_dimensions; ++i) { in fold() 2599 std::vector<uint64_t> input_indices(num_dimensions); in fold() 2600 ComputePermutation(input_tensor, perm, output_shape, num_dimensions, in fold()
|
/external/tensorflow/tensorflow/stream_executor/cuda/ |
D | curand_10_0.inc | 81 curandGenerator_t generator, unsigned int num_dimensions) { 86 return func_ptr(generator, num_dimensions);
|
/external/tensorflow/tensorflow/lite/tools/serialization/ |
D | writer_lib_test.cc | 359 builtin_data->num_dimensions = 3; in TEST_P() 360 for (int dim = 0; dim < builtin_data->num_dimensions; ++dim) { in TEST_P()
|
/external/tensorflow/tensorflow/lite/core/api/ |
D | flatbuffer_conversions.cc | 88 size_t num_dimensions = flat_vector->size(); in FlatBufferIntVectorToArray() local 89 if (num_dimensions > max_size_of_buffer / sizeof(int)) { in FlatBufferIntVectorToArray() 96 for (size_t i = 0; i < num_dimensions; ++i) { in FlatBufferIntVectorToArray() 1666 params->num_dimensions = new_shape->size(); in ParseReshape()
|
/external/tensorflow/tensorflow/lite/delegates/nnapi/ |
D | nnapi_delegate.cc | 1913 int num_dimensions = params->num_dimensions; in Validate() local 1914 if (num_dimensions == 1 && params->shape[0] == 0) { in Validate() 1917 num_dimensions = 0; in Validate() 1919 Expect(num_dimensions > 0, in Validate() 2861 int num_dimensions = params->num_dimensions; in Map() local 2862 std::vector<int32_t> output_shape(num_dimensions); in Map() 2863 for (int i = 0; i < num_dimensions; ++i) { in Map() 2867 output_shape.data(), static_cast<uint32_t>(num_dimensions)); in Map()
|