/external/tensorflow/tensorflow/contrib/rnn/kernels/ |
D | gru_ops.cc | 52 const int64 batch_size = x_tensor->dim_size(0); in Compute() 53 const int64 input_size = x_tensor->dim_size(1); in Compute() 54 const int64 cell_size = h_prev_tensor->dim_size(1); in Compute() 59 OP_REQUIRES(ctx, h_prev_tensor->dim_size(0) == batch_size, in Compute() 61 h_prev_tensor->dim_size(0), " vs. ", in Compute() 63 OP_REQUIRES(ctx, h_prev_tensor->dim_size(1) == cell_size, in Compute() 65 "h_prev.dims(1) != cell_size: ", h_prev_tensor->dim_size(1), in Compute() 69 OP_REQUIRES(ctx, w_ru_tensor->dim_size(0) == input_size + cell_size, in Compute() 72 w_ru_tensor->dim_size(0), " vs. ", input_size + cell_size)); in Compute() 74 OP_REQUIRES(ctx, w_ru_tensor->dim_size(1) == cell_size * 2, in Compute() [all …]
|
D | lstm_ops.cc | 268 const int64 batch_size = x_tensor->dim_size(0); in Compute() 269 const int64 input_size = x_tensor->dim_size(1); in Compute() 270 const int64 cell_size = cs_prev_tensor->dim_size(1); in Compute() 273 OP_REQUIRES(ctx, cs_prev_tensor->dim_size(0) == batch_size, in Compute() 275 cs_prev_tensor->dim_size(0), " vs. ", in Compute() 277 OP_REQUIRES(ctx, cs_prev_tensor->dim_size(1) == cell_size, in Compute() 279 cs_prev_tensor->dim_size(1), " vs. ", in Compute() 282 OP_REQUIRES(ctx, h_prev_tensor->dim_size(0) == batch_size, in Compute() 284 h_prev_tensor->dim_size(0), " vs. ", in Compute() 286 OP_REQUIRES(ctx, h_prev_tensor->dim_size(1) == cell_size, in Compute() [all …]
|
/external/tensorflow/tensorflow/core/framework/ |
D | partial_tensor_shape_test.cc | 37 EXPECT_EQ(10, s.dim_size(0)); in TEST() 38 EXPECT_EQ(5, s.dim_size(1)); in TEST() 43 EXPECT_EQ(10, s1.dim_size(0)); in TEST() 44 EXPECT_EQ(5, s1.dim_size(1)); in TEST() 45 EXPECT_EQ(10, s1.dim_size(2)); in TEST() 46 EXPECT_EQ(5, s1.dim_size(3)); in TEST() 53 EXPECT_EQ(10, s2.dim_size(0)); in TEST() 54 EXPECT_EQ(10, s3.dim_size(0)); in TEST() 55 EXPECT_EQ(5, s2.dim_size(1)); in TEST() 56 EXPECT_EQ(5, s3.dim_size(1)); in TEST() [all …]
|
/external/tensorflow/tensorflow/core/kernels/neon/ |
D | neon_depthwise_conv_op.cc | 73 const int32 in_depth = input.dim_size(3); in Compute() 74 OP_REQUIRES(context, in_depth == filter.dim_size(2), in Compute() 77 " vs ", filter.dim_size(2))); in Compute() 78 const int32 batch = input.dim_size(0); in Compute() 79 const int32 input_rows = input.dim_size(1); in Compute() 80 const int32 input_cols = input.dim_size(2); in Compute() 82 const int32 filter_rows = filter.dim_size(0); in Compute() 83 const int32 filter_cols = filter.dim_size(1); in Compute() 84 const int32 depth_multiplier = filter.dim_size(3); in Compute() 155 result.sizes[0] = input.dim_size(3); in ToNeonDims() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | roll_op.cc | 40 const int num_dims, const gtl::ArraySlice<int>& dim_size, in DoRoll() argument 43 auto work = [input, output, num_dims, &dim_size, &threshold, &dim_range]( in DoRoll() 53 const int64 stride = dim_range[i] / dim_size[i]; in DoRoll() 54 const int shift = dim_size[i] - threshold[i]; in DoRoll() 55 const int indx = (start / stride) % dim_size[i]; in DoRoll() 58 const int shifted_indx = (indx + shift) % dim_size[i]; in DoRoll() 67 const int indx = (indices[j] + 1) % dim_size[j]; in DoRoll() 103 const int num_dims, const gtl::ArraySlice<int>& dim_size, in DoRollWithMemcpy() argument 108 auto work = [input, output, num_dims, &dim_size, &threshold, &dim_range, isd]( in DoRollWithMemcpy() 114 const int64 isd_stride = isd_range / std::max<int>(dim_size[isd], 1); in DoRollWithMemcpy() [all …]
|
D | image_resizer_state.h | 95 batch_size = input.dim_size(0); in ValidateAndCalculateOutputSize() 100 FastBoundsCheck(input.dim_size(1), std::numeric_limits<int32>::max()) && in ValidateAndCalculateOutputSize() 101 FastBoundsCheck(input.dim_size(2), in ValidateAndCalculateOutputSize() 105 in_height = static_cast<int32>(input.dim_size(1)); in ValidateAndCalculateOutputSize() 106 in_width = static_cast<int32>(input.dim_size(2)); in ValidateAndCalculateOutputSize() 107 channels = input.dim_size(3); in ValidateAndCalculateOutputSize() 114 context, input.dim_size(1) > 0 && input.dim_size(2) > 0, in ValidateAndCalculateOutputSize() 138 TensorShape({input.dim_size(0), out_height, in ValidateAndCreateOutput() 139 out_width, input.dim_size(3)}), in ValidateAndCreateOutput() 186 batch_size = input.dim_size(0); in ValidateAndCreateOutput() [all …]
|
D | sparse_slice_grad_op.cc | 51 input_indices->dim_size(1) == output_indices->dim_size(1), in Compute() 53 "ndims: got: ", input_indices->dim_size(1), " and ", in Compute() 54 output_indices->dim_size(1))); in Compute() 56 ctx, output_indices->dim_size(0) <= input_indices->dim_size(0), in Compute() 59 output_indices->dim_size(0), " and ", in Compute() 60 input_indices->dim_size(0))); in Compute() 62 ctx, backprop_val_grad->NumElements() == output_indices->dim_size(0), in Compute() 66 output_indices->dim_size(0))); in Compute() 72 const int num_dims = input_indices->dim_size(1); in Compute() 78 const int64 input_nnz = input_indices->dim_size(0); in Compute()
|
D | lrn_op.cc | 82 const int batch = static_cast<int>(in.dim_size(0)); in launch() 83 const int rows = static_cast<int>(in.dim_size(1)); in launch() 84 const int cols = static_cast<int>(in.dim_size(2)); in launch() 85 const int depth = static_cast<int>(in.dim_size(3)); in launch() 189 const int batch = static_cast<int>(in.dim_size(0)); in launch() 190 const int rows = static_cast<int>(in.dim_size(1)); in launch() 191 const int cols = static_cast<int>(in.dim_size(2)); in launch() 192 const int depth = static_cast<int>(in.dim_size(3)); in launch() 260 const int batch = static_cast<int>(in.dim_size(0)); in Compute() 261 const int rows = static_cast<int>(in.dim_size(1)); in Compute() [all …]
|
D | sparse_add_grad_op.cc | 54 a_indices->dim_size(1) == b_indices->dim_size(1) && in Compute() 55 b_indices->dim_size(1) == sum_indices->dim_size(1), in Compute() 58 a_indices->dim_size(1), b_indices->dim_size(1), in Compute() 59 sum_indices->dim_size(1))); in Compute() 61 ctx, backprop_val_grad->NumElements() == sum_indices->dim_size(0), in Compute() 65 sum_indices->dim_size(0))); in Compute() 67 const int num_dims = a_indices->dim_size(1); in Compute() 68 const int64 a_nnz = a_indices->dim_size(0); in Compute() 69 const int64 b_nnz = b_indices->dim_size(0); in Compute()
|
D | sparse_conditional_accumulator.h | 102 if (shape_.dim_size(i) != -1 && in ValidateShape() 103 shape_.dim_size(i) != tensor_shape_flat(i)) { in ValidateShape() 105 i, " to be ", shape_.dim_size(i), in ValidateShape() 111 if (shape_.dims() > 0 && shape_.dim_size(0) != -1 && in ValidateShape() 113 for (int64 i = 0; i < tensor_idx->dim_size(0); i++) { in ValidateShape() 114 if (tensor_idx->vec<int64>()(i) >= shape_.dim_size(0)) { in ValidateShape() 118 shape_.dim_size(0)); in ValidateShape() 131 if (accum_val_->dim_size(i) != tensor_val->dim_size(i)) { in ValidateShape() 133 i, " to be ", accum_val_->dim_size(i), in ValidateShape() 134 ", got ", tensor_val->dim_size(i)); in ValidateShape() [all …]
|
D | searchsorted_op.cc | 90 OP_REQUIRES(ctx, sorted_inputs_t.dim_size(0) == values_t.dim_size(0), in Compute() 104 FastBoundsCheck(sorted_inputs_t.dim_size(1), in Compute() 108 sorted_inputs_t.dim_size(1))); in Compute() 116 ctx, sorted_inputs, values, sorted_inputs_t.dim_size(0), in Compute() 117 sorted_inputs_t.dim_size(1), values_t.dim_size(1), &output)); in Compute() 131 OP_REQUIRES(ctx, sorted_inputs_t.dim_size(0) == values_t.dim_size(0), in Compute() 145 FastBoundsCheck(sorted_inputs_t.dim_size(1), in Compute() 149 sorted_inputs_t.dim_size(1))); in Compute() 157 ctx, sorted_inputs, values, sorted_inputs_t.dim_size(0), in Compute() 158 sorted_inputs_t.dim_size(1), values_t.dim_size(1), &output)); in Compute()
|
D | conv_grad_ops.cc | 63 dim->input_size = input_shape.dim_size(spatial_dim); in ConvBackpropExtractAndVerifyDimension() 64 dim->filter_size = filter_shape.dim_size(filter_spatial_dim); in ConvBackpropExtractAndVerifyDimension() 65 dim->output_size = output_shape.dim_size(spatial_dim); in ConvBackpropExtractAndVerifyDimension() 119 dims->batch_size = input_shape.dim_size(batch_dim); in ConvBackpropComputeDimensionsV2() 120 if (dims->batch_size != out_backprop_shape.dim_size(batch_dim)) { in ConvBackpropComputeDimensionsV2() 124 "outbackprop batch: ", out_backprop_shape.dim_size(batch_dim), in ConvBackpropComputeDimensionsV2() 129 dims->in_depth = input_shape.dim_size(feature_dim); in ConvBackpropComputeDimensionsV2() 133 << filter_shape.dim_size(num_dims - 2); in ConvBackpropComputeDimensionsV2() 134 if (dims->in_depth % filter_shape.dim_size(num_dims - 2)) { in ConvBackpropComputeDimensionsV2() 138 dims->out_depth = filter_shape.dim_size(num_dims - 1); in ConvBackpropComputeDimensionsV2() [all …]
|
D | mkl_transpose_op.cc | 63 mkl_##PREFIX##omatcopy('R', trans, in.dim_size(0), in.dim_size(1), 1, \ 64 in.flat<T>().data(), in.dim_size(1), \ 65 out->flat<T>().data(), in.dim_size(0)); \ 79 'R', trans, in.dim_size(0), in.dim_size(1), alpha, in INSTANTIATE() 81 in.dim_size(1), in INSTANTIATE() 84 in.dim_size(0)); in INSTANTIATE() 93 'R', trans, in.dim_size(0), in.dim_size(1), alpha, in MKLTranspose2D() 95 in.dim_size(1), in MKLTranspose2D() 98 in.dim_size(0)); in MKLTranspose2D()
|
D | batch_matmul_op_impl.h | 162 t.flat<Scalar>().data() + slice * t.dim_size(1) * t.dim_size(2), 163 t.dim_size(1), t.dim_size(2)); 168 t->flat<Scalar>().data() + slice * t->dim_size(1) * t->dim_size(2), 169 t->dim_size(1), t->dim_size(2)); 210 const int64 batch_size = in_x.dim_size(0); 212 in_x.dim_size(1) * in_x.dim_size(2) * out->dim_size(2); 214 std::min(in_x.dim_size(1), in_x.dim_size(2)), out->dim_size(2)); 299 const uint64 m = in_x.dim_size(adj_x ? 2 : 1); 300 const uint64 k = in_x.dim_size(adj_x ? 1 : 2); 301 const uint64 n = in_y.dim_size(adj_y ? 1 : 2); [all …]
|
D | conv_ops_fused_image_transform.cc | 659 st.batch_size = input.dim_size(0); in Compute() 660 st.out_height = input.dim_size(1); in Compute() 661 st.out_width = input.dim_size(2); in Compute() 662 st.in_height = input.dim_size(1); in Compute() 663 st.in_width = input.dim_size(2); in Compute() 664 st.channels = input.dim_size(3); in Compute() 669 {input.dim_size(0), st.out_height, st.out_width, input.dim_size(3)}); in Compute() 685 paddings.dim_size(1) == 2, in Compute() 689 (allow_legacy_scalars() && dims == 0 && paddings.dim_size(0) == 1) in Compute() 693 context, fixed_dims == paddings.dim_size(0), in Compute() [all …]
|
D | linalg_ops_common.cc | 66 input_matrix_shapes[0].dim_size(0) == input_matrix_shapes[1].dim_size(0), in ValidateSolver() 84 input_matrix_shapes[0].dim_size(0) == input_matrix_shapes[1].dim_size(0), in ValidateSquareSolver() 131 batch_shape->AddDim(in.dim_size(dim)); in AnalyzeInputs() 140 context, in.dim_size(dim) == batch_shape->dim_size(dim), in AnalyzeInputs() 148 const int64 num_rows = in.dim_size(row_dimension); in AnalyzeInputs() 149 const int64 num_cols = in.dim_size(col_dimension); in AnalyzeInputs() 229 input_matrix_shapes[i].dim_size(0), input_matrix_shapes[i].dim_size(1)); in ComputeTensorSlice() 236 ? output_matrix_shapes[i].dim_size(0) in ComputeTensorSlice() 239 ? output_matrix_shapes[i].dim_size(1) in ComputeTensorSlice()
|
D | summary_image_op.cc | 59 (tensor.dim_size(3) == 1 || tensor.dim_size(3) == 3 || in Compute() 60 tensor.dim_size(3) == 4), in Compute() 67 tensor.dim_size(0) < (1LL << 31) && in Compute() 68 tensor.dim_size(1) < (1LL << 31) && in Compute() 69 tensor.dim_size(2) < (1LL << 31) && in Compute() 70 (tensor.dim_size(1) * tensor.dim_size(2)) < (1LL << 29), in Compute() 75 const int batch_size = static_cast<int>(tensor.dim_size(0)); in Compute() 76 const int h = static_cast<int>(tensor.dim_size(1)); in Compute() 77 const int w = static_cast<int>(tensor.dim_size(2)); in Compute() 79 const int depth = static_cast<int>(tensor.dim_size(3)); in Compute() [all …]
|
D | mkl_conv_ops.h | 200 FastBoundsCheck(filter_shape.dim_size(i), in GetFilterSizeInMklOrder() 208 OP_REQUIRES(context_, input_depth == filter_shape.dim_size(2), in GetFilterSizeInMklOrder() 211 input_depth, " vs ", filter_shape.dim_size(2))); in GetFilterSizeInMklOrder() 215 static_cast<int>(filter_shape.dim_size(TF_2DFILTER_DIM_H)); in GetFilterSizeInMklOrder() 217 static_cast<int>(filter_shape.dim_size(TF_2DFILTER_DIM_W)); in GetFilterSizeInMklOrder() 219 static_cast<int>(filter_shape.dim_size(TF_2DFILTER_DIM_I)); in GetFilterSizeInMklOrder() 221 static_cast<int>(filter_shape.dim_size(TF_2DFILTER_DIM_O)); in GetFilterSizeInMklOrder() 246 OP_REQUIRES(context_, input_depth == filter_shape.dim_size(3), in GetFilterSizeInMklOrder() 249 input_depth, " vs ", filter_shape.dim_size(3))); in GetFilterSizeInMklOrder() 253 static_cast<int>(filter_shape.dim_size(TF_3DFILTER_DIM_P)); in GetFilterSizeInMklOrder() [all …]
|
/external/tensorflow/tensorflow/contrib/coder/kernels/ |
D | range_coder_ops_util.cc | 48 (broadcast_shape.dim_size(j) != storage_shape.dim_size(j)) && in MergeAxes() 49 (storage_shape.dim_size(j) != 1))) { in MergeAxes() 56 const bool is_broadcasting = (storage_shape.dim_size(j) == 1); in MergeAxes() 63 (broadcast_shape.dim_size(j) <= 1) || in MergeAxes() 67 merged_broadcast_shape[i] *= broadcast_shape.dim_size(j); in MergeAxes() 68 merged_storage_shape[i] *= storage_shape.dim_size(j); in MergeAxes() 71 merged_broadcast_shape.push_back(broadcast_shape.dim_size(j)); in MergeAxes() 72 merged_storage_shape.push_back(storage_shape.dim_size(j)); in MergeAxes() 79 storage_stride *= storage_shape.dim_size(i); in MergeAxes()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | shape_util.cc | 30 int64 dim_size = input_shape.dim_size(i); in TensorShapeToConstant() local 31 if (!FastBoundsCheck(dim_size, std::numeric_limits<int32>::max())) { in TensorShapeToConstant() 34 " but dim ", i, " is ", dim_size); in TensorShapeToConstant() 36 vec(i) = static_cast<int32>(dim_size); in TensorShapeToConstant() 41 int64 dim_size = input_shape.dim_size(i); in TensorShapeToConstant() local 42 vec(i) = dim_size; in TensorShapeToConstant()
|
D | lrn_ops.cc | 96 const int64 batch = in_grads_shape.dim_size(0); in Compile() 97 const int64 rows = in_grads_shape.dim_size(1); in Compile() 98 const int64 cols = in_grads_shape.dim_size(2); in Compile() 99 const int64 depth = in_grads_shape.dim_size(3); in Compile() 101 ctx, in_image_shape.dim_size(0) == batch && in Compile() 102 in_image_shape.dim_size(1) == rows && in Compile() 103 in_image_shape.dim_size(2) == cols && in Compile() 104 in_image_shape.dim_size(3) == depth && in Compile() 105 out_image_shape.dim_size(0) == batch && in Compile() 106 out_image_shape.dim_size(1) == rows && in Compile() [all …]
|
/external/tensorflow/tensorflow/core/summary/ |
D | summary_converter.cc | 184 if (bad_color_tensor.dim_size(0) < depth) { in NormalizeAndAddImages() 187 ", bad_color.size = ", bad_color_tensor.dim_size(0)); in NormalizeAndAddImages() 240 (tensor.dim_size(3) == 1 || tensor.dim_size(3) == 3 || in AddTensorAsImageToSummary() 241 tensor.dim_size(3) == 4))) { in AddTensorAsImageToSummary() 246 if (!(tensor.dim_size(0) < (1LL << 31) && tensor.dim_size(1) < (1LL << 31) && in AddTensorAsImageToSummary() 247 tensor.dim_size(2) < (1LL << 31) && in AddTensorAsImageToSummary() 248 (tensor.dim_size(1) * tensor.dim_size(2)) < (1LL << 29))) { in AddTensorAsImageToSummary() 253 const int batch_size = static_cast<int>(tensor.dim_size(0)); in AddTensorAsImageToSummary() 254 const int h = static_cast<int>(tensor.dim_size(1)); in AddTensorAsImageToSummary() 255 const int w = static_cast<int>(tensor.dim_size(2)); in AddTensorAsImageToSummary() [all …]
|
/external/tensorflow/tensorflow/contrib/tensor_forest/kernels/ |
D | scatter_add_ndim_op.cc | 38 if (indices_tensor.shape().dim_size(0) > 0) { in Compute() 44 indices_tensor.shape().dim_size(1) + delta_dims == in Compute() 51 indices_tensor.shape().dim_size(0) == in Compute() 52 deltas_tensor.shape().dim_size(0), in Compute() 70 static_cast<int32>(indices_tensor.shape().dim_size(1)); in Compute() 76 num_data_per_index *= input_tensor.shape().dim_size(num_dims + i); in Compute() 87 const int32 m = last_size / input_tensor.shape().dim_size(j); in Compute() 93 for (int32 i = 0; i < indices_tensor.shape().dim_size(0); i++) { in Compute()
|
/external/tensorflow/tensorflow/contrib/factorization/kernels/ |
D | wals_solver_ops.cc | 95 context, input_indices.dim_size(1) == 2, in Compute() 101 OP_REQUIRES(context, input_indices.dim_size(0) == input_values.dim_size(0), in Compute() 111 ((input_weights.dim_size(0) > 0 && in Compute() 112 factor_weights.dim_size(0) == factors.dim_size(0) && in Compute() 113 entry_weights.dim_size(0) == 0) || in Compute() 114 (input_weights.dim_size(0) == 0 && factor_weights.dim_size(0) == 0 && in Compute() 115 entry_weights.dim_size(0) == input_indices.dim_size(0))), in Compute() 122 const int64 factor_dim = factors.dim_size(1); in Compute() 123 const int64 factors_size = factors.dim_size(0); in Compute() 124 const int64 num_nonzero_elements = input_indices.dim_size(0); in Compute()
|
/external/tensorflow/tensorflow/core/grappler/utils/ |
D | symbolic_shapes.cc | 25 dims.reserve(shape.dim_size()); in ShapeDims() 26 for (int i = 0; i < shape.dim_size(); ++i) in ShapeDims() 56 return shape.dim_size(); in Rank() 76 left.dim_size() != right.dim_size()) { in ShapesSymbolicallyEqual() 79 for (int i = 0; i < left.dim_size(); ++i) { in ShapesSymbolicallyEqual() 153 for (int i = 0; i < shape.dim_size(); ++i) { in CompareSymbolicallyShapedTensorSizes() 155 int64 dim_size = dim.size(); in CompareSymbolicallyShapedTensorSizes() local 156 if (dim_size > 0) { in CompareSymbolicallyShapedTensorSizes() 157 *defined_size *= dim_size; in CompareSymbolicallyShapedTensorSizes() 161 ++(*unknown_dims)[dim_size]; in CompareSymbolicallyShapedTensorSizes()
|