/external/tensorflow/tensorflow/compiler/tests/ |
D | spacetobatch_op_test.py | 30 def space_to_batch_direct(input_array, block_shape, paddings): argument 44 block_shape = np.array(block_shape) 45 num_block_dims = len(block_shape) 46 paddings = np.array(paddings).reshape((len(block_shape), 2)) 53 output_shape = [input_array.shape[0] * np.prod(block_shape)] 54 for block_dim, block_shape_value in enumerate(block_shape): 155 def _testPad(self, inputs, block_shape, paddings, outputs): argument 156 block_shape = np.array(block_shape) 157 paddings = np.array(paddings).reshape((len(block_shape), 2)) 176 x_tf = array_ops.space_to_batch_nd(placeholder, block_shape, [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | spacetobatch_op_test.py | 34 def space_to_batch_direct(input_array, block_shape, paddings): argument 48 block_shape = np.array(block_shape) 49 num_block_dims = len(block_shape) 50 paddings = np.array(paddings).reshape((len(block_shape), 2)) 57 output_shape = [input_array.shape[0] * np.prod(block_shape)] 58 for block_dim, block_shape_value in enumerate(block_shape): 200 def _testPad(self, inputs, block_shape, paddings, outputs): argument 201 block_shape = np.array(block_shape) 202 paddings = np.array(paddings).reshape((len(block_shape), 2)) 207 math_ops.cast(inputs, dtypes.float32), block_shape, paddings) [all …]
|
D | batchtospace_op_test.py | 139 def _testStaticShape(self, input_shape, block_shape, paddings, error): argument 140 block_shape = np.array(block_shape) 146 np.zeros(input_shape, np.float32), block_shape, paddings) 148 def _testDynamicShape(self, input_shape, block_shape, paddings): argument 149 block_shape = np.array(block_shape) 155 dtypes.int32, shape=block_shape.shape) 163 block_shape_placeholder: block_shape, 167 def _testShape(self, input_shape, block_shape, paddings, error): argument 168 self._testStaticShape(input_shape, block_shape, paddings, error) 169 self._testDynamicShape(input_shape, block_shape, paddings) [all …]
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_BatchToSpaceND.pbtxt | 11 name: "block_shape" 23 `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. 28 [block_shape[0], ..., block_shape[M-1], 29 batch / prod(block_shape), 33 [batch / prod(block_shape), 35 input_shape[1], block_shape[0], 37 input_shape[M], block_shape[M-1], 42 [batch / prod(block_shape), 44 input_shape[1] * block_shape[0], 46 input_shape[M] * block_shape[M-1], [all …]
|
D | api_def_SpaceToBatchND.pbtxt | 11 name: "block_shape" 22 `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. 32 [padded_shape[1] / block_shape[0], 33 block_shape[0], 35 padded_shape[M] / block_shape[M-1], 36 block_shape[M-1]] + 42 block_shape + 44 [padded_shape[1] / block_shape[0], 46 padded_shape[M] / block_shape[M-1]] + 49 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch [all …]
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | batch_to_space_nd.cc | 39 block_shape = GetInput(context, node, 1); in BatchToSpaceNDContext() 44 const TfLiteTensor* block_shape; member 59 const int* block_shape = GetTensorData<int32>(op_context->block_shape); in ResizeOutputTensor() local 62 TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->block_shape), in ResizeOutputTensor() 64 TF_LITE_ENSURE_EQ(context, op_context->block_shape->dims->data[0], in ResizeOutputTensor() 76 input_size->data[0] % (block_shape[0] * block_shape[1]), 0); in ResizeOutputTensor() 79 input_size->data[0] / (block_shape[0] * block_shape[1]); in ResizeOutputTensor() 86 input_size->data[1] * block_shape[0] - crops_top - crops_bottom; in ResizeOutputTensor() 88 input_size->data[2] * block_shape[1] - crops_left - crops_right; in ResizeOutputTensor() 110 if (!IsConstantTensor(op_context.block_shape) || in Prepare() [all …]
|
D | space_to_batch_nd.cc | 39 block_shape = GetInput(context, node, 1); in SpaceToBatchNDContext() 44 const TfLiteTensor* block_shape; member 59 const int32* block_shape = GetTensorData<int32>(op_context->block_shape); in ResizeOutputTensor() local 62 TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->block_shape), in ResizeOutputTensor() 64 TF_LITE_ENSURE_EQ(context, op_context->block_shape->dims->data[0], in ResizeOutputTensor() 76 TF_LITE_ENSURE_EQ(context, final_dim_size % block_shape[dim], 0); in ResizeOutputTensor() 77 output_size->data[dim + 1] = final_dim_size / block_shape[dim]; in ResizeOutputTensor() 81 input_size->data[0] * block_shape[0] * block_shape[1]; in ResizeOutputTensor() 99 if (!IsConstantTensor(op_context.block_shape) || in Prepare() 121 GetTensorShape(op_context.block_shape), \ in Eval() [all …]
|
D | batch_to_space_nd_test.cc | 64 std::initializer_list<int> block_shape, in BatchToSpaceNDOpConstModel() argument 68 block_shape_ = AddConstInput(TensorType_INT32, block_shape, {2}); in BatchToSpaceNDOpConstModel()
|
D | space_to_batch_nd_test.cc | 72 std::initializer_list<int> block_shape, in SpaceToBatchNDOpConstModel() argument 76 block_shape_ = AddConstInput(TensorType_INT32, block_shape, {2}); in SpaceToBatchNDOpConstModel()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | spacetobatch_op.cc | 26 absl::Span<const int64> block_shape, in SpaceToBatch() argument 31 const int block_rank = block_shape.size(); in SpaceToBatch() 66 block_num_elems *= block_shape[i]; in SpaceToBatch() 92 OP_REQUIRES(ctx, padded_shape[1 + i] % block_shape[i] == 0, in SpaceToBatch() 96 "]=", block_shape[i])); in SpaceToBatch() 98 reshaped_padded_shape[1 + i * 2] = padded_shape[1 + i] / block_shape[i]; in SpaceToBatch() 99 reshaped_padded_shape[1 + i * 2 + 1] = block_shape[i]; in SpaceToBatch() 139 output_shape[1 + i] = padded_shape[1 + i] / block_shape[i]; in SpaceToBatch() 153 std::vector<int64> block_shape; in Compile() local 154 OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntVector(1, &block_shape)); in Compile() [all …]
|
D | batchtospace_op.cc | 26 absl::Span<const int64> block_shape, in BatchToSpace() argument 31 const int block_rank = block_shape.size(); in BatchToSpace() 54 block_num_elems *= block_shape[i]; in BatchToSpace() 71 std::copy(block_shape.begin(), block_shape.end(), reshaped_shape.begin()); in BatchToSpace() 108 reshaped_permuted_shape[1 + i] = block_shape[i] * input_shape[1 + i]; in BatchToSpace() 151 std::vector<int64> block_shape; in Compile() local 152 OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntVector(1, &block_shape)); in Compile() 158 block_shape, crops); in Compile()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | spacetobatch_functor.cc | 40 const int64* space_tensor_strides, const int64* block_shape, in run() 47 batch_tensor_pos * block_shape[0] + block_offsets[0] - pad_start[0]; in run() 51 space_tensor_shape + 1, space_tensor_strides + 1, block_shape + 1, in run() 71 const int64* space_tensor_strides, const int64* block_shape, in run() 104 int64 block_shape[NUM_BLOCK_DIMS]; in operator ()() local 109 block_shape[block_dim] = block_shape_tensor[block_dim]; in operator ()() 138 block_dim > 0 ? block_index % block_shape[block_dim] : block_index; in operator ()() 139 block_index /= block_shape[block_dim]; in operator ()() 145 space_tensor_shape, &space_tensor_strides[1], block_shape, pad_start, in operator ()()
|
D | spacetobatch_functor_gpu.cu.cc | 39 int32 block_shape[NUM_BLOCK_DIMS]; member 70 offset %= args.block_shape[block_dim]; in S2B() 73 batch_tensor_pos[block_dim + 1] * args.block_shape[block_dim] + in S2B() 95 remaining_block_idx /= args.block_shape[block_dim]; in S2B() 108 const int64 block_shape[NUM_BLOCK_DIMS], in operator ()() 118 if (block_shape[block_dim] > std::numeric_limits<int32>::max()) { in operator ()() 121 args.block_shape[block_dim] = block_shape[block_dim]; in operator ()()
|
D | batchtospace_op.cc | 68 gtl::InlinedVector<int64, 4> block_shape; in BatchToSpaceOpCompute() local 70 internal::spacetobatch::SubtleMustCopyFlat(orig_block_shape, &block_shape); in BatchToSpaceOpCompute() 79 block_shape[dim] != 1) { in BatchToSpaceOpCompute() 91 block_shape[dim] != 1) { in BatchToSpaceOpCompute() 99 block_shape_product *= block_shape[block_dim]; in BatchToSpaceOpCompute() 155 const int64 block_shape_value = block_shape[block_dim]; in BatchToSpaceOpCompute() 182 const int64* internal_block_shape = &block_shape[removed_prefix_block_dims]; in BatchToSpaceOpCompute()
|
D | spacetobatch_op.cc | 71 gtl::InlinedVector<int64, 4> block_shape; in SpaceToBatchOpCompute() local 73 internal::spacetobatch::SubtleMustCopyFlat(orig_block_shape, &block_shape); in SpaceToBatchOpCompute() 82 block_shape[dim] != 1) { in SpaceToBatchOpCompute() 94 block_shape[dim] != 1) { in SpaceToBatchOpCompute() 102 block_shape_product *= block_shape[block_dim]; in SpaceToBatchOpCompute() 154 const int64 block_shape_value = block_shape[block_dim]; in SpaceToBatchOpCompute() 184 const int64* internal_block_shape = &block_shape[removed_prefix_block_dims]; in SpaceToBatchOpCompute()
|
/external/tensorflow/tensorflow/python/ops/ |
D | array_ops.py | 2604 block_shape, argument 2636 [input_shape, block_shape]): 2639 block_shape = ops.convert_to_tensor( 2640 block_shape, dtype=dtypes.int32, name="block_shape") 2642 block_shape.get_shape().assert_is_fully_defined() 2643 block_shape.get_shape().assert_has_rank(1) 2644 num_block_dims = block_shape.get_shape().dims[0].value 2657 const_block_shape = tensor_util.constant_value(block_shape) 2662 block_shape = const_block_shape 2670 pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape [all …]
|
/external/tensorflow/tensorflow/core/ops/ |
D | array_ops_test.cc | 1378 Tensor block_shape = test::AsTensor<int32>({2, 3}); in TEST() local 1379 op.input_tensors[1] = &block_shape; in TEST() 1405 Tensor block_shape = test::AsTensor<int32>({0, 2}); in TEST() local 1406 op.input_tensors[1] = &block_shape; in TEST() 1412 Tensor block_shape = test::AsTensor<int32>({1, 1}); in TEST() local 1413 op.input_tensors[1] = &block_shape; in TEST() 1422 Tensor block_shape = test::AsTensor<int32>({3, 3}); in TEST() local 1423 op.input_tensors[1] = &block_shape; in TEST() 1495 Tensor block_shape = test::AsTensor<int32>({2, 3}); in TEST() local 1496 op.input_tensors[1] = &block_shape; in TEST() [all …]
|
/external/tensorflow/tensorflow/tools/graph_transforms/ |
D | flatten_atrous.cc | 62 Tensor block_shape = in FlattenAtrousConv() local 64 const int32 block_height = block_shape.flat<int32>()(0); in FlattenAtrousConv() 65 const int32 block_width = block_shape.flat<int32>()(1); in FlattenAtrousConv()
|
/external/tensorflow/tensorflow/tools/api/golden/v1/ |
D | tensorflow.manip.pbtxt | 5 …argspec: "args=[\'input\', \'block_shape\', \'crops\', \'name\'], varargs=None, keywords=None, def… 29 …argspec: "args=[\'input\', \'block_shape\', \'paddings\', \'name\'], varargs=None, keywords=None, …
|
/external/tensorflow/tensorflow/lite/toco/graph_transformations/ |
D | resolve_batch_to_space_nd_attributes.cc | 39 if (!op->block_shape.empty()) { in Run() 72 op->block_shape.push_back(block_shape_buffer[i]); in Run()
|
D | resolve_space_to_batch_nd_attributes.cc | 39 if (!op->block_shape.empty()) { in Run() 76 op->block_shape.push_back(block_shape_buffer[i]); in Run()
|
/external/tensorflow/tensorflow/cc/gradients/ |
D | array_grad_test.cc | 287 auto block_shape = Const(scope_, {2, 2}); in TEST_F() local 290 auto y = SpaceToBatchND(scope_, x, block_shape, paddings); in TEST_F() 306 auto block_shape = Const(scope_, {2, 2}); in TEST_F() local 309 auto y = BatchToSpaceND(scope_, x, block_shape, paddings); in TEST_F()
|
/external/tensorflow/tensorflow/python/ops/linalg/ |
D | linear_operator_circulant.py | 193 def block_shape(self): member in _BaseLinearOperatorCirculant 219 self.block_shape.is_fully_defined()): 223 final_shape = vec_leading_shape.concatenate(self.block_shape)
|
/external/tensorflow/tensorflow/python/kernel_tests/linalg/ |
D | linear_operator_block_diag_test.py | 91 block_shape, dtype, force_well_conditioned=True) 92 for block_shape in expected_blocks
|
D | linear_operator_kronecker_test.py | 115 block_shape, dtype, force_well_conditioned=True) 116 for block_shape in expected_factors
|