/external/llvm-project/mlir/test/Dialect/Shape/ |
D | ops.mlir | 7 func @shape_num_elements(%shape : !shape.shape) -> !shape.size { 8 %init = shape.const_size 1 9 %num_elements = shape.reduce(%shape, %init) : !shape.shape -> !shape.size { 10 ^bb0(%index : index, %extent : !shape.size, %acc : !shape.size): 11 %acc_next = shape.mul %acc, %extent 12 : !shape.size, !shape.size -> !shape.size 13 shape.yield %acc_next : !shape.size 15 return %num_elements : !shape.size 19 func @extent_tensor_num_elements(%shape : tensor<?xindex>) -> index { 21 %num_elements = shape.reduce(%shape, %init) : tensor<?xindex> -> index { [all …]
|
D | canonicalize.mlir | 5 // CHECK: shape.const_shape [2, 3, 4] : tensor<?xindex> 6 %0 = shape.shape_of %arg0 : tensor<2x3x4xf32> -> tensor<?xindex> 14 func @f() -> (!shape.shape, !shape.shape) { 15 // CHECK: shape.const_shape [2, 3] : !shape.shape 16 // CHECK: shape.const_shape [4, 5] : !shape.shape 18 %0 = shape.const_shape [2, 3, 4, 5] : !shape.shape 19 %head, %tail = "shape.split_at"(%0, %c2) : (!shape.shape, i32) -> (!shape.shape, !shape.shape) 20 return %head, %tail : !shape.shape, !shape.shape 28 func @f() -> (!shape.shape, !shape.shape) { 29 // CHECK: shape.const_shape [2, 3, 4] : !shape.shape [all …]
|
D | invalid.mlir | 3 func @reduce_op_args_num_mismatch(%shape : !shape.shape, %init : !shape.size) { 5 %num_elements = shape.reduce(%shape, %init) : !shape.shape -> !shape.size { 6 ^bb0(%index: index, %dim: !shape.size): 7 shape.yield %dim : !shape.size 14 func @reduce_op_arg0_wrong_type(%shape : !shape.shape, %init : !shape.size) { 16 %num_elements = shape.reduce(%shape, %init) : !shape.shape -> !shape.size { 17 ^bb0(%index: f32, %dim: !shape.size, %acc: !shape.size): 18 %new_acc = "shape.add"(%acc, %dim) 19 : (!shape.size, !shape.size) -> !shape.size 20 shape.yield %new_acc : !shape.size [all …]
|
D | remove-shape-constraints.mlir | 1 // RUN: mlir-opt -allow-unregistered-dialect -split-input-file -remove-shape-constraints -canonical… 2 // RUN: mlir-opt -allow-unregistered-dialect -split-input-file -remove-shape-constraints <%s | File… 8 func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> index { 9 // REPLACE-NEXT: %[[WITNESS:.+]] = shape.const_witness true 10 // REPLACE-NOT: shape.cstr_eq 11 // REPLACE: shape.assuming %[[WITNESS]] 14 %0 = shape.cstr_broadcastable %arg0, %arg1 : !shape.shape, !shape.shape 15 %1 = shape.assuming %0 -> index { 17 shape.assuming_yield %2 : index 26 func @f(%arg0 : !shape.shape, %arg1 : !shape.shape) -> index { [all …]
|
D | shape-to-shape.mlir | 1 // RUN: mlir-opt -shape-to-shape-lowering -split-input-file %s | FileCheck %s 4 // CHECK-SAME: ([[ARG:%.*]]: !shape.shape) -> !shape.size 5 func @num_elements_to_reduce(%shape : !shape.shape) -> !shape.size { 6 %num_elements = shape.num_elements %shape : !shape.shape -> !shape.size 7 return %num_elements : !shape.size 9 // CHECK: [[C1:%.*]] = shape.const_size 1 10 // CHECK: [[NUM_ELEMENTS:%.*]] = shape.reduce([[ARG]], [[C1]]) : !shape.shape -> !shape.size 11 // CHECK: ^bb0({{.*}}: index, [[DIM:%.*]]: !shape.size, [[ACC:%.*]]: !shape.size 12 // CHECK: [[NEW_ACC:%.*]] = shape.mul [[DIM]], [[ACC]] 13 // CHECK: shape.yield [[NEW_ACC]] : !shape.size [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | shape_inference.h | 16 // Shape inference is used by the XLA service as the user builds up 35 // For a given operation and input shapes, infers what the resulting shape is 38 // the shape that results from an operation is inferred. Some methods have 39 // overloads for inferring shape at the HLO level. 41 // TODO(b/73352135): Shape inference does not issue very good error messages, in 42 // part because HloInstruction::ToString() is not available since shape 47 // Infers the shape produced by applying the given unary operation to the 48 // given input shape. 49 static StatusOr<Shape> InferUnaryOpShape(HloOpcode opcode, 50 const Shape& shape); [all …]
|
D | hlo_creation_utils.cc | 40 TF_ASSIGN_OR_RETURN(Shape unary_op_shape, in MakeUnaryHlo() 50 TF_ASSIGN_OR_RETURN(Shape binary_op_shape, in MakeBinaryHlo() 62 Shape binary_op_shape, in MakeCompareHlo() 74 Shape pad_shape, in MakePadHlo() 75 ShapeInference::InferPadShape(operand->shape(), padding_value->shape(), in MakePadHlo() 86 TF_ASSIGN_OR_RETURN(Shape slice_shape, ShapeInference::InferSliceShape( in MakeSliceHlo() 87 operand->shape(), start_indices, in MakeSliceHlo() 102 Shape convolve_shape, in MakeConvolveHlo() 104 lhs->shape(), rhs->shape(), feature_group_count, batch_group_count, in MakeConvolveHlo() 115 Shape transpose_shape, in MakeTransposeHlo() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | shape_util.h | 32 #include "tensorflow/compiler/xla/shape.h" 49 // An index for specifying a particular nested subshape within a shape. Used in 53 // shape. For a non-nested tuple, an index has a single element. For example, 166 // Returns true if this shape index starts with 'prefix'. 179 // Namespaced collection of (static) shape utilities. 185 // Data structure which describes the coordinates and the shape, of a tuple 186 // shaped sub-shape. 189 IndexedShape(ShapeIndex index, Shape shape) in IndexedShape() 190 : index(std::move(index)), shape(std::move(shape)) {} in IndexedShape() 192 Shape shape; member [all …]
|
D | shape_util.cc | 118 // Constructs and returns the new shape with the given minor_to_major order in 120 StatusOr<Shape> MakeShapeWithLayoutInternal( in MakeShapeWithLayoutInternal() 132 TF_ASSIGN_OR_RETURN(Shape shape, in MakeShapeWithLayoutInternal() 139 *shape.mutable_layout() = LayoutUtil::MakeLayout( in MakeShapeWithLayoutInternal() 141 if (!shape.has_layout()) { in MakeShapeWithLayoutInternal() 142 return InvalidArgument("Shape has no layout."); in MakeShapeWithLayoutInternal() 144 TF_RETURN_IF_ERROR(ShapeUtil::ValidateShape(shape)); in MakeShapeWithLayoutInternal() 145 return shape; in MakeShapeWithLayoutInternal() 149 /* static */ bool ShapeUtil::Equal(const Shape& lhs, const Shape& rhs) { in Equal() 150 bool equal = Shape::Equal()(lhs, rhs); in Equal() [all …]
|
D | layout_util_test.cc | 29 Shape MakeShapeWithLayout(PrimitiveType element_type, in MakeShapeWithLayout() 32 Shape shape = ShapeUtil::MakeShape(element_type, dimensions); in MakeShapeWithLayout() local 33 *shape.mutable_layout() = LayoutUtil::MakeLayout(minor_to_major); in MakeShapeWithLayout() 34 return shape; in MakeShapeWithLayout() 39 Shape shape = in TEST_F() local 41 Shape other_shape = in TEST_F() 44 Shape tuple0 = ShapeUtil::MakeTupleShape({}); in TEST_F() 45 Shape tuple1 = ShapeUtil::MakeTupleShape({shape}); in TEST_F() 46 Shape tuple2 = ShapeUtil::MakeTupleShape({shape, shape}); in TEST_F() 58 Shape other_tuple2 = ShapeUtil::MakeTupleShape({shape, other_shape}); in TEST_F() [all …]
|
/external/gemmlowp/test/ |
D | benchmark_meta_gemm.cc | 64 struct Shape { struct 73 Shape(std::int32_t n, std::int32_t m, std::int32_t k) in Shape() argument 104 double run_gemms(std::vector<Shape>* shapes) { in run_gemms() argument 106 for (auto& shape : *shapes) { in run_gemms() 107 ops += run_gemm(shape.n, shape.m, shape.k, shape.working_set().lhs, in run_gemms() 108 shape.working_set().rhs, shape.working_set().result); in run_gemms() 159 void time_all(std::vector<Shape>* shapes, std::int32_t repetitions, in time_all() 179 void time_one(Shape* shape, double max_time) { in time_one() argument 184 std::cout << std::setprecision(6) << std::fixed << shape->n << ", " in time_one() 185 << shape->m << ", " << shape->k << ", " << std::flush; in time_one() [all …]
|
/external/tensorflow/tensorflow/lite/delegates/gpu/common/ |
D | convert.cc | 28 #include "tensorflow/lite/delegates/gpu/common/shape.h" 43 absl::Status ConvertToPHWO4I4(absl::Span<const float> in, const OHWI& shape, in ConvertToPHWO4I4() argument 45 if (in.size() != shape.DimensionsProduct()) { in ConvertToPHWO4I4() 48 in.size(), " != ", shape.DimensionsProduct())); in ConvertToPHWO4I4() 50 if (out.size() != GetElementsSizeForPHWO4I4(shape)) { in ConvertToPHWO4I4() 53 out.size(), " != ", GetElementsSizeForPHWO4I4(shape))); in ConvertToPHWO4I4() 57 for (int p = 0; p < DivideRoundUp(shape.o, kPhwo4i4ChannelsInPlane); ++p) { in ConvertToPHWO4I4() 58 for (int h = 0; h < shape.h; ++h) { in ConvertToPHWO4I4() 59 for (int w = 0; w < shape.w; ++w) { in ConvertToPHWO4I4() 60 for (int c = 0; c < DivideRoundUp(shape.i, kPhwo4i4ChannelsInPlane); in ConvertToPHWO4I4() [all …]
|
/external/llvm-project/mlir/docs/ |
D | ShapeInference.md | 1 # Shape Inference 3 Shape inference as discussed here is considered a specific instance of type 6 dimensions. While some operations have no compile time fixed shape (e.g., output 7 shape is dictated by data) we could still have some knowledge of 11 shape. 15 `InferShapedTypeOpInterface` is used to implement the shape and element type 16 inference. The return type can often be deduced from the deduced return shape 20 ## Shape functions 22 The C++ interfaces are the base mechanism whereby shape inference is queried and 23 executed, but not the intended way to specify shape constraints in general. [all …]
|
/external/llvm-project/mlir/include/mlir/Dialect/Shape/IR/ |
D | ShapeOps.td | 1 //===- Shape.td - Shape operations definition --------------*- tablegen -*-===// 9 // This is the operation definition file for Shape dialect operations. 16 include "mlir/Dialect/Shape/IR/ShapeBase.td" 24 // Shape op definitions 51 let summary = "Returns the broadcasted output shape of two inputs"; 53 Returns the broadcasted shape for two input shapes or extent tensors. Both 54 operands can be of type `shape.shape` or `tensor<?xindex>`. The result is of 55 type `shape.shape` and, if both operands are tensors, may be of type 59 with 1's from the left. The resulting broadcasted shape is then defined as 65 In case the resulting shape is undefined, i.e. if corresponding extents are [all …]
|
D | ShapeBase.td | 9 // Base definitions for the `shape` dialect. 19 // Shape Inference dialect definitions 23 let name = "shape"; 25 let summary = "Types and operations for shape dialect"; 27 This dialect contains operations for shape inference. 29 Note: Unless explicitly stated, all functions that return a shape and take 30 shapes as input, return the invalid shape if one of its operands is an 31 invalid shape. This avoids flagging multiple errors for one verification 37 let cppNamespace = "::mlir::shape"; 43 CPred<"$_self.isa<::mlir::shape::ComponentType>()">, "component type">, [all …]
|
/external/tensorflow/tensorflow/java/src/main/java/org/tensorflow/ |
D | Shape.java | 20 /** The possibly partially known shape of a tensor produced by an operation. */ 21 public final class Shape { class 23 /** Create a Shape representing an unknown number of dimensions. */ 24 public static Shape unknown() { in unknown() 25 return new Shape(null); in unknown() 28 /** Create a Shape representing a scalar value. */ 29 public static Shape scalar() { in scalar() 30 return new Shape(new long[0]); in scalar() 34 * Create a Shape representing an N-dimensional value. 36 * <p>Creates a Shape representing an N-dimensional value (N being at least 1), with the provided [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/xla/ir/ |
D | mlir_hlo_builder.h | 30 #include "tensorflow/compiler/xla/shape.h" 101 // Returns the shape of the given op. 102 StatusOr<const Shape*> GetShapePtr(XlaOp op) const override; 114 const Shape& shape, XlaOp lhs, XlaOp rhs, const Window& window, 123 StatusOr<XlaOp> FftInternal(const Shape& shape, XlaOp operand, 128 const Shape& shape, XlaOp a, XlaOp b, 131 StatusOr<XlaOp> CholeskyInternal(const Shape& shape, XlaOp a, 136 const Shape& shape, const string& opaque, 137 absl::optional<absl::Span<const Shape>> operand_shapes_with_layout, 144 const Shape& shape, absl::Span<const XlaOp> all_operands, [all …]
|
/external/tensorflow/tensorflow/python/ops/ |
D | init_ops_v2_test.py | 40 shape=None, argument 42 if shape is None: 43 shape = [100] 44 t1 = self.evaluate(init1(shape, dtype)) 45 t2 = self.evaluate(init2(shape, dtype)) 46 self.assertEqual(tensor_shape.as_shape(shape), t1.shape) 47 self.assertEqual(tensor_shape.as_shape(shape), t2.shape) 50 def _duplicated_test(self, init, shape=None, dtype=dtypes.float32): argument 51 if shape is None: 52 shape = [100] [all …]
|
D | init_ops_v2.py | 46 def __call__(self, shape, dtype=None, **kwargs): 47 # returns a tensor of shape `shape` and dtype `dtype` 52 def __call__(self, shape, dtype=None, **kwargs): argument 56 shape: Shape of the tensor. 61 partition in a partitioned variable. `partition_shape` is the shape of 62 the partition (i.e. the shape of the returned tensor) and 64 partition w.r.t each axis. For example, a tensor of shape `(30, 100)` 65 can be partitioned into two partitions: `p0` of shape `(10, 100)` and 66 `p1` of shape `(20, 100)`; if the initializer is called with 116 the Initializer object, without knowing the shape and dtype of the variable [all …]
|
D | random_ops.py | 47 def random_normal(shape, argument 59 <tf.Tensor: shape=(4,), dtype=float32, numpy=..., dtype=float32)> 65 <tf.Tensor: shape=(2, 2), dtype=float32, numpy= 74 shape: A 1-D integer Tensor or Python array. The shape of the output tensor. 87 A tensor of the specified shape filled with random normal values. 89 with ops.name_scope(name, "random_normal", [shape, mean, stddev]) as name: 90 shape_tensor = tensor_util.shape_tensor(shape) 98 tensor_util.maybe_set_static_shape(value, shape) 105 def parameterized_truncated_normal(shape, argument 120 shape: A 1-D integer Tensor or Python array. The shape of the output tensor. [all …]
|
D | stateless_random_ops.py | 66 [ 10670227 -246211131]], shape=(3, 2), dtype=int32) 67 >>> tf.random.stateless_normal(shape=[3], seed=new_seeds[0, :]) 68 <tf.Tensor: shape=(3,), dtype=float32, numpy=array([-0.59835213, -0.9578608 , 72 seed: an RNG seed (a tensor with shape [2] and dtype `int32` or 78 A tensor with shape [num, 2] representing `num` new seeds. It will have the 83 return stateless_random_uniform(shape=[num, 2], seed=seed, dtype=seed.dtype, 103 tf.Tensor([1105988140 3], shape=(2,), dtype=int32) 104 >>> tf.random.stateless_normal(shape=[3], seed=replica_seed) 105 <tf.Tensor: shape=(3,), dtype=float32, numpy=array([0.03197195, 0.8979765 , 109 seed: an RNG seed (a tensor with shape [2] and dtype `int32` or [all …]
|
/external/tensorflow/tensorflow/python/grappler/ |
D | datasets_test.py | 15 """Tests for the datasets shape inference.""" 39 'shape': tensor_shape.TensorShape([]) 42 'shape': tensor_shape.TensorShape([3]) 45 'shape': tensor_shape.TensorShape([1, 3]) 58 self.assertEqual(test_case['shape'], 59 op_properties['IteratorGetNext'][0].shape) 64 'shape': tensor_shape.TensorShape([]) 67 'shape': tensor_shape.TensorShape([3]) 70 'shape': tensor_shape.TensorShape([1, 3]) 83 self.assertEqual(test_case['shape'], [all …]
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | input_spec.py | 35 """Specifies the rank, dtype and shape of every input to a layer. 40 compatibility checks for input structure, input rank, input shape, and 43 A None entry in a shape is compatible with any dimension, 44 a None shape is compatible with any shape. 48 shape: Shape tuple, expected shape of the input 67 # The layer will accept inputs with shape (?, 28, 28) & (?, 28, 28, 1) 70 shape=(None, 28, 28, 1), 77 shape=None, argument 85 shape = tensor_shape.TensorShape(shape) 86 if shape.rank is None: [all …]
|
/external/tflite-support/tensorflow_lite_support/java/src/java/org/tensorflow/lite/support/tensorbuffer/ |
D | TensorBuffer.java | 33 /** Shape of the tensor stored in this buffer. */ 34 protected int[] shape; field in TensorBuffer 46 * Creates a {@link TensorBuffer} with specified {@code shape} and {@link DataType}. Here are some 50 * Creating a float TensorBuffer with shape {2, 3}: 51 * int[] shape = new int[] {2, 3}; 52 * TensorBuffer tensorBuffer = TensorBuffer.createFixedSize(shape, DataType.FLOAT32); 57 * int[] shape = new int[] {}; 58 * TensorBuffer tensorBuffer = TensorBuffer.createFixedSize(shape, DataType.UINT8); 63 * int[] shape = new int[] {0}; 64 * TensorBuffer tensorBuffer = TensorBuffer.createFixedSize(shape, DataType.UINT8); [all …]
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_sharding.py | 164 def get_unpartitioned_shape(self, shape): argument 165 """Returns the shape of an unpartitioned Tensor. 167 When given the shape of a 'sharded-size' Tensor, returns the shape 168 of the full shape of its unpartitioned Tensor. 171 shape: The shape of the sharded Tensor. 174 The shape of the unpartitioned version of the Tensor. 177 ValueError: if shape has unknown sharded dimension 179 shape = tensor_shape.as_shape(shape) 180 dims = shape.as_list() 185 raise ValueError("shape %s must have a fixed size for dimension %d " [all …]
|