/external/eigen/unsupported/Eigen/CXX11/src/Tensor/ |
D | TensorFixedSize.h | 56 …NLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } in dimension() 57 … EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.di… in dimensions() function 144 const Index index = i1 + i0 * m_storage.dimensions()[1]; in operator() 147 const Index index = i0 + i1 * m_storage.dimensions()[0]; in operator() 155 const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0); in operator() 158 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2); in operator() 166 …const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_sto… in operator() 169 …const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_sto… in operator() 177 …ex = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions… in operator() 180 …ex = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions… in operator() [all …]
|
D | TensorBroadcasting.h | 122 const InputDimensions& input_dims = m_impl.dimensions(); 146 …EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } 177 eigen_assert(idx < m_impl.dimensions()[i]); 181 eigen_assert(idx % m_impl.dimensions()[i] == 0); 183 inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; 189 eigen_assert(index < m_impl.dimensions()[0]); 193 eigen_assert(index % m_impl.dimensions()[0] == 0); 195 inputIndex += (index % m_impl.dimensions()[0]); 207 eigen_assert(idx < m_impl.dimensions()[i]); 211 eigen_assert(idx % m_impl.dimensions()[i] == 0); [all …]
|
D | Tensor.h | 101 … Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } in dimension() 102 …N_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.di… in dimensions() function 376 … EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions) in Tensor() argument 377 : m_storage(internal::array_prod(dimensions), dimensions) in Tensor() 388 resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); in Tensor() 397 resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); in Tensor() 406 resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); 416 resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions()); 432 EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions) in resize() argument 437 internal::check_rows_cols_for_overflow<Dynamic>::run(size, dimensions[i]); in resize() [all …]
|
/external/eigen/unsupported/test/ |
D | cxx11_tensor_dimension.cpp | 19 Eigen::DSizes<int, 3> dimensions(2,3,7); in test_dynamic_size() local 21 VERIFY_IS_EQUAL((int)Eigen::internal::array_get<0>(dimensions), 2); in test_dynamic_size() 22 VERIFY_IS_EQUAL((int)Eigen::internal::array_get<1>(dimensions), 3); in test_dynamic_size() 23 VERIFY_IS_EQUAL((int)Eigen::internal::array_get<2>(dimensions), 7); in test_dynamic_size() 24 VERIFY_IS_EQUAL((int)dimensions.TotalSize(), 2*3*7); in test_dynamic_size() 25 VERIFY_IS_EQUAL((int)dimensions[0], 2); in test_dynamic_size() 26 VERIFY_IS_EQUAL((int)dimensions[1], 3); in test_dynamic_size() 27 VERIFY_IS_EQUAL((int)dimensions[2], 7); in test_dynamic_size() 32 Eigen::Sizes<2,3,7> dimensions; in test_fixed_size() local 34 VERIFY_IS_EQUAL((int)Eigen::internal::array_get<0>(dimensions), 2); in test_fixed_size() [all …]
|
D | cxx11_tensor_sycl.cpp | 44 …float * gpu_in1_data = static_cast<float*>(sycl_device.allocate(in1.dimensions().TotalSize()*size… in test_sycl_cpu() 45 …float * gpu_in2_data = static_cast<float*>(sycl_device.allocate(in2.dimensions().TotalSize()*size… in test_sycl_cpu() 46 …float * gpu_in3_data = static_cast<float*>(sycl_device.allocate(in3.dimensions().TotalSize()*size… in test_sycl_cpu() 47 …float * gpu_out_data = static_cast<float*>(sycl_device.allocate(out.dimensions().TotalSize()*size… in test_sycl_cpu() 56 …sycl_device.memcpyDeviceToHost(in1.data(), gpu_in1_data ,(in1.dimensions().TotalSize())*sizeof(flo… in test_sycl_cpu() 68 …sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data ,(out.dimensions().TotalSize())*sizeof(flo… in test_sycl_cpu() 80 …sycl_device.memcpyHostToDevice(gpu_in2_data, in2.data(),(in2.dimensions().TotalSize())*sizeof(floa… in test_sycl_cpu() 82 …sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(floa… in test_sycl_cpu() 96 …sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(floa… in test_sycl_cpu() 110 …sycl_device.memcpyDeviceToHost(out.data(), gpu_out_data,(out.dimensions().TotalSize())*sizeof(floa… in test_sycl_cpu() [all …]
|
D | cxx11_tensor_reduction_sycl.cpp | 39 …float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(f… in test_full_reductions_sycl() 45 …sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); in test_full_reductions_sycl() 74 …float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(f… in test_first_dim_reductions_sycl() 75 …float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*… in test_first_dim_reductions_sycl() 80 …sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); in test_first_dim_reductions_sycl() 82 …sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*… in test_first_dim_reductions_sycl() 112 …float* gpu_in_data = static_cast<float*>(sycl_device.allocate(in.dimensions().TotalSize()*sizeof(f… in test_last_dim_reductions_sycl() 113 …float* gpu_out_data = static_cast<float*>(sycl_device.allocate(redux_gpu.dimensions().TotalSize()*… in test_last_dim_reductions_sycl() 118 …sycl_device.memcpyHostToDevice(gpu_in_data, in.data(),(in.dimensions().TotalSize())*sizeof(float)); in test_last_dim_reductions_sycl() 120 …sycl_device.memcpyDeviceToHost(redux_gpu.data(), gpu_out_data, redux_gpu.dimensions().TotalSize()*… in test_last_dim_reductions_sycl()
|
D | cxx11_tensor_simple.cpp | 66 VERIFY_IS_EQUAL((vec1.dimensions()[0]), 6); in test_1d() 134 VERIFY_IS_EQUAL((mat1.dimensions()[0]), 2); in test_2d() 135 VERIFY_IS_EQUAL((mat1.dimensions()[1]), 3); in test_2d() 139 VERIFY_IS_EQUAL((mat2.dimensions()[0]), 2); in test_2d() 140 VERIFY_IS_EQUAL((mat2.dimensions()[1]), 3); in test_2d() 165 VERIFY_IS_EQUAL((epsilon.dimensions()[0]), 3); in test_3d() 166 VERIFY_IS_EQUAL((epsilon.dimensions()[1]), 3); in test_3d() 167 VERIFY_IS_EQUAL((epsilon.dimensions()[2]), 3); in test_3d() 206 VERIFY_IS_EQUAL((t1.dimensions()[0]), 2); in test_3d() 207 VERIFY_IS_EQUAL((t1.dimensions()[1]), 3); in test_3d() [all …]
|
/external/smali/dexlib2/src/main/java/org/jf/dexlib2/analysis/ |
D | ArrayProto.java | 47 protected final int dimensions; field in ArrayProto 64 dimensions = i; in ArrayProto() 70 @Nonnull @Override public String getType() { return makeArrayType(elementType, dimensions); } in getType() 71 public int getDimensions() { return dimensions; } in getDimensions() 84 if (dimensions > 1) { in getImmediateElementType() 85 return makeArrayType(elementType, dimensions-1); in getImmediateElementType() 104 if (dimensions == ((ArrayProto)other).dimensions && in getCommonSuperclass() 111 if (dimensions == ((ArrayProto)other).dimensions) { in getCommonSuperclass() 121 return classPath.getClass(makeArrayType(mergedClass.getType(), dimensions)); in getCommonSuperclass() 124 int dimensions = Math.min(this.dimensions, ((ArrayProto)other).dimensions); in getCommonSuperclass() local [all …]
|
/external/mesa3d/src/mesa/main/ |
D | pbo.c | 67 _mesa_validate_pbo_access(GLuint dimensions, in _mesa_validate_pbo_access() argument 113 start = _mesa_image_offset(dimensions, pack, width, height, in _mesa_validate_pbo_access() 117 end = _mesa_image_offset(dimensions, pack, width, height, in _mesa_validate_pbo_access() 179 _mesa_validate_pbo_source(struct gl_context *ctx, GLuint dimensions, in _mesa_validate_pbo_source() argument 186 assert(dimensions == 1 || dimensions == 2 || dimensions == 3); in _mesa_validate_pbo_source() 188 if (!_mesa_validate_pbo_access(dimensions, unpack, width, height, depth, in _mesa_validate_pbo_source() 222 _mesa_validate_pbo_source_compressed(struct gl_context *ctx, GLuint dimensions, in _mesa_validate_pbo_source_compressed() argument 260 GLuint dimensions, in _mesa_map_validate_pbo_source() argument 267 if (!_mesa_validate_pbo_source(ctx, dimensions, unpack, in _mesa_map_validate_pbo_source() 338 GLuint dimensions, in _mesa_map_validate_pbo_dest() argument [all …]
|
D | pbo.h | 35 _mesa_validate_pbo_access(GLuint dimensions, 48 GLuint dimensions, 65 GLuint dimensions, 77 _mesa_validate_pbo_teximage(struct gl_context *ctx, GLuint dimensions, 85 GLuint dimensions, GLsizei imageSize, 96 _mesa_validate_pbo_source(struct gl_context *ctx, GLuint dimensions, 104 _mesa_validate_pbo_source_compressed(struct gl_context *ctx, GLuint dimensions,
|
/external/pdfium/fxbarcode/pdf417/ |
D | BC_PDF417.cpp | 416 std::vector<int32_t> dimensions = in generateBarcodeLogic() local 418 if (dimensions.size() != 2) in generateBarcodeLogic() 420 int32_t cols = dimensions[0]; in generateBarcodeLogic() 421 int32_t rows = dimensions[1]; in generateBarcodeLogic() 543 std::vector<int32_t> dimensions; in determineDimensions() local 554 if (!dimensions.empty() && in determineDimensions() 559 dimensions.resize(2); in determineDimensions() 560 dimensions[0] = cols; in determineDimensions() 561 dimensions[1] = rows; in determineDimensions() 563 if (dimensions.empty()) { in determineDimensions() [all …]
|
/external/tensorflow/tensorflow/compiler/tf2xla/lib/ |
D | batch_dot.cc | 55 if (x_shape->dimensions(i) != y_shape->dimensions(i)) { in BatchDot() 66 if (x_shape->dimensions(x_inner_dim) != y_shape->dimensions(y_inner_dim)) { in BatchDot() 78 std::vector<int64> dimensions(batch_dimension_numbers.size()); in BatchDot() local 80 dimensions[i] = x_shape->dimensions(batch_dimension_numbers[i]); in BatchDot() 84 dimensions.push_back(x_shape->dimensions(x_outer_dim)); in BatchDot() 85 dimensions.push_back(y_shape->dimensions(y_outer_dim)); in BatchDot() 88 dimensions); in BatchDot()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | window_util.cc | 87 for (const auto& window_dimension : window.dimensions()) { in ToString() 123 for (const auto& dim : window.dimensions()) { in HasStride() 132 for (const auto& dim : window.dimensions()) { in HasPadding() 141 return std::all_of(window.dimensions().begin(), window.dimensions().end(), in HasSymmetricPadding() 148 return std::all_of(padding_config.dimensions().begin(), in HasSymmetricPadding() 149 padding_config.dimensions().end(), in HasSymmetricPadding() 156 return std::any_of(window.dimensions().begin(), window.dimensions().end(), in HasNegativePadding() 163 for (const auto& dim : window.dimensions()) { in HasBaseDilation() 172 for (const auto& dim : window.dimensions()) { in HasWindowDilation() 181 for (const auto& dim : window.dimensions()) { in HasWindowReversal() [all …]
|
D | shape_util.cc | 120 PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions, in MakeShapeWithLayoutInternal() argument 122 if (dimensions.size() != minor_to_major.size()) { in MakeShapeWithLayoutInternal() 124 dimensions.size(), minor_to_major.size()); in MakeShapeWithLayoutInternal() 130 Shape shape = ShapeUtil::MakeShape(element_type, dimensions); in MakeShapeWithLayoutInternal() 163 for (int64 dimension : shape.dimensions()) { in TrueRank() 183 PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions) { in MakeShape() argument 187 PopulateShape(element_type, dimensions, &result); in MakeShape() 192 PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions, in MakeShapeWithLayout() argument 194 return MakeShapeWithLayoutInternal(element_type, dimensions, minor_to_major) in MakeShapeWithLayout() 199 PrimitiveType element_type, tensorflow::gtl::ArraySlice<int64> dimensions) { in MakeShapeWithDescendingLayout() argument [all …]
|
D | index_util.cc | 37 DCHECK_LT(multi_index[i], shape.dimensions(i)) in MultidimensionalIndexToLinearIndex() 85 scale = shape.dimensions(dimension); in MultidimensionalIndexToLinearIndex() 89 scale *= shape.dimensions(dimension); in MultidimensionalIndexToLinearIndex() 115 (linear_index / divisor) % shape.dimensions(dimension); in LinearIndexToMultidimensionalIndex() 116 divisor *= shape.dimensions(dimension); in LinearIndexToMultidimensionalIndex() 124 int64 limit = shape.dimensions(dimno); in BumpIndices() 144 stride *= shape.dimensions(dim); in GetDimensionStride() 159 if (index[d] >= shape.dimensions(d)) { in IndexInBounds()
|
/external/javaparser/javaparser-symbol-solver-testing/src/test/test_sourcecode/javaparser_src/proper_source/com/github/javaparser/ast/expr/ |
D | ArrayCreationExpr.java | 41 private List<Expression> dimensions; field in ArrayCreationExpr 63 public ArrayCreationExpr(Type type, List<Expression> dimensions, int arrayCount) { in ArrayCreationExpr() argument 66 setDimensions(dimensions); in ArrayCreationExpr() 70 …t beginColumn, int endLine, int endColumn, Type type, List<Expression> dimensions, int arrayCount)… in ArrayCreationExpr() argument 74 setDimensions(dimensions); in ArrayCreationExpr() 93 return dimensions; in getDimensions() 108 public void setDimensions(List<Expression> dimensions) { in setDimensions() argument 109 this.dimensions = dimensions; in setDimensions() 110 setAsParentNodeOf(this.dimensions); in setDimensions()
|
/external/tensorflow/tensorflow/docs_src/performance/xla/ |
D | broadcasting.md | 18 has to match at least one of the dimensions of the matrix. 25 The matrix's dimensions are (2,3), the vector's are (3). The vector is broadcast 49 of broadcasting dimensions. An element-wise binary operation between a scalar 58 Most broadcasting needs can be captured by using a tuple of dimensions on a 64 a vector of dimension (3) to a matrix of dimensions (2,3). *Without specifying 75 a 3x3 matrix (dimensions (3,3)). There are two ways broadcasting can happen for 95 The broadcasting dimensions can be a tuple that describes how a smaller rank 98 dimensions 1 and 2 of the cuboid. 109 array, by specifying which dimensions of the higher-rank array to match. For 110 example, for an array with dimensions MxNxPxQ, a vector with dimension T can be [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | shape_inference.cc | 259 const auto& dim = window.dimensions(i); in InferWindowOutputShape() 424 if (arg_shape->dimensions(dimension_number) != in InferConcatOpShape() 425 shape->dimensions(dimension_number)) { in InferConcatOpShape() 441 std::vector<int64> new_dimensions(arg_shape->dimensions().begin(), in InferConcatOpShape() 442 arg_shape->dimensions().end()); in InferConcatOpShape() 444 new_dimensions[dimension] += arg_shapes[i]->dimensions(dimension); in InferConcatOpShape() 549 std::vector<int64> dimensions(ShapeUtil::Rank(operand_shape)); in InferPadShape() local 551 dimensions[i] = operand_shape.dimensions(i) + in InferPadShape() 552 padding_config.dimensions(i).edge_padding_low() + in InferPadShape() 553 padding_config.dimensions(i).edge_padding_high() + in InferPadShape() [all …]
|
D | hlo_constant_folding_test.cc | 107 tensorflow::gtl::ArraySlice<int64> dimensions; in TEST_F() member 116 std::vector<int64> dimensions(test_config.dimensions.begin(), in TEST_F() local 117 test_config.dimensions.end()); in TEST_F() 121 dimensions[test_config.concat_dimension] = csize; in TEST_F() 123 auto literal = Literal::CreateFromDimensions(F32, dimensions); in TEST_F() 128 dimensions[test_config.concat_dimension] = concat_size; in TEST_F() 129 Shape shape = ShapeUtil::MakeShape(F32, dimensions); in TEST_F() 147 const int64 dimensions[] = {11, 8, 7, 5, 9}; in TEST_F() local 153 ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0)); in TEST_F() 173 const int64 dimensions[] = {11, 8, 7, 5, 9}; in TEST_F() local [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | cudnn_convolution_rewriter.cc | 91 for (const WindowDimension& window_dim : conv->window().dimensions()) { in MatchBackwardFilter() 130 int64 filter_size = conv->shape().dimensions(output_spatial_dims[i]); in MatchBackwardFilter() 133 dim->set_stride(conv->window().dimensions(i).window_dilation()); in MatchBackwardFilter() 136 dim->set_padding_low(conv->window().dimensions(i).padding_low()); in MatchBackwardFilter() 139 conv->operand(0)->shape().dimensions(input_spatial_dims[i]); in MatchBackwardFilter() 140 int64 output_size = conv->window().dimensions(i).size(); in MatchBackwardFilter() 229 if (kernel_spatial_dims.size() != reverse_filter->dimensions().size() || in MatchBackwardInput() 232 reverse_filter->dimensions().begin())) { in MatchBackwardInput() 240 if (conv->window().dimensions(i).size() != 1) { in MatchBackwardInput() 255 for (const WindowDimension& window_dim : conv->window().dimensions()) { in MatchBackwardInput() [all …]
|
D | pad_insertion.cc | 61 std::max<int64>(0LL, conv_window.dimensions(i).padding_low())); in MaybePaddedAndSlicedInput() 63 std::max<int64>(0LL, conv_window.dimensions(i).padding_high())); in MaybePaddedAndSlicedInput() 65 conv_window.dimensions(i).base_dilation() - 1); in MaybePaddedAndSlicedInput() 87 std::vector<int64> limit_indices(input->shape().dimensions().begin(), in MaybePaddedAndSlicedInput() 88 input->shape().dimensions().end()); in MaybePaddedAndSlicedInput() 95 std::max<int64>(0LL, -conv_window.dimensions(i).padding_low()); in MaybePaddedAndSlicedInput() 97 std::max<int64>(0LL, -conv_window.dimensions(i).padding_high()); in MaybePaddedAndSlicedInput() 129 conv_window.dimensions(i).window_dilation() - 1); in MaybePaddedKernel() 168 dim->set_size(new_kernel->shape().dimensions( in CanonicalizeForwardConvolution() 224 int64 padding_low = backward_conv->window().dimensions(i).padding_low(); in CanonicalizeBackwardFilterConvolution() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | ir_array.cc | 37 dims_(shape.dimensions().begin(), shape.dimensions().end()) { in Index() 44 int64 size_of_current_dimension = shape.dimensions(dimension); in Index() 72 dims_(shape.dimensions().begin(), shape.dimensions().end()) { in Index() 83 dims_(shape.dimensions().begin(), shape.dimensions().end()) { in Index() 86 linear_ = Linearize(AsInt64Slice(shape.dimensions()), ir_builder); in Index() 115 ShapeUtil::StripDegenerateDimensions(a).dimensions(), in LinearValidOnShape() 116 ShapeUtil::StripDegenerateDimensions(b).dimensions()) && in LinearValidOnShape() 127 CommonFactors(AsInt64Slice(input_shape.dimensions()), in SourceIndexOfReshape() 128 AsInt64Slice(output_shape.dimensions())); in SourceIndexOfReshape() 141 AsInt64Slice(output_shape.dimensions()), in SourceIndexOfReshape() [all …]
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_Squeeze.pbtxt | 12 Contains the same data as `input`, but has one or more dimensions of 20 If specified, only squeezes the dimensions listed. The dimension 25 summary: "Removes dimensions of size 1 from the shape of a tensor." 28 all dimensions of size 1 removed. If you don't want to remove all size 1 29 dimensions, you can remove specific size 1 dimensions by specifying 39 Or, to remove specific size 1 dimensions:
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | conv_canonicalization.cc | 61 new_input_dims[0] = input->shape().dimensions(input_batch_dim); in Run() 65 input->shape().dimensions(dnums.input_spatial_dimensions(i)); in Run() 69 input->shape().dimensions(input_feature_dim); in Run() 84 kernel->shape().dimensions(dnums.kernel_spatial_dimensions(i)); in Run() 88 kernel->shape().dimensions(kernel_input_feature_dim); in Run() 91 kernel->shape().dimensions(kernel_output_feature_dim); in Run() 104 new_conv_dims[0] = hlo->shape().dimensions(output_batch_dim); in Run() 108 hlo->shape().dimensions(dnums.output_spatial_dimensions(i)); in Run() 111 new_conv_dims[num_dims - 1] = hlo->shape().dimensions(output_feature_dim); in Run()
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | numpy_bridge.cc | 117 PyObject* dimensions; in PyShapeInfoFromXlaShape() local 120 dimensions = PyTuple_New(ShapeUtil::TupleElementCount(shape)); in PyShapeInfoFromXlaShape() 123 dimensions, i, in PyShapeInfoFromXlaShape() 128 dimensions = PyTuple_New(rank); in PyShapeInfoFromXlaShape() 130 PyTuple_SET_ITEM(dimensions, i, in PyShapeInfoFromXlaShape() 134 return PyTuple_Pack(2, np_dtype, dimensions); in PyShapeInfoFromXlaShape() 255 std::vector<int64> dimensions(length); in XlaShapeFromPyShape() local 258 dimensions[i] = PyIntOrPyLongToLong(PyTuple_GetItem(py_dimensions, i)); in XlaShapeFromPyShape() 259 if (dimensions[i] == -1 && PyErr_Occurred()) { in XlaShapeFromPyShape() 275 return ShapeUtil::MakeShapeWithLayout(element_type, dimensions, in XlaShapeFromPyShape() [all …]
|