Home
last modified time | relevance | path

Searched refs:xindex (Results 1 – 25 of 60) sorted by relevance

123

/external/llvm-project/mlir/test/Conversion/ShapeToStandard/
Dshape-to-standard.mlir31 // CHECK-SAME: (%[[SHAPE:.*]]: tensor<?xindex>) -> index
32 func @rank(%shape : tensor<?xindex>) -> index {
36 %rank = shape.rank %shape : tensor<?xindex> -> index
44 func @get_extent(%shape : tensor<?xindex>, %idx : !shape.size) -> !shape.size {
47 : tensor<?xindex>, !shape.size -> !shape.size
70 %shape = shape.shape_of %arg : tensor<2x3xf32> -> tensor<?xindex>
71 %result = shape.get_extent %shape, %idx : tensor<?xindex>, index -> index
79 // CHECK-SAME: (%[[EXTENTS:.*]]: tensor<?xindex>, %[[IDX:.*]]: index) -> index
80 func @get_extent_from_extent_tensor(%extents : tensor<?xindex>, %idx : index)
82 // CHECK: %[[RESULT:.*]] = extract_element %[[EXTENTS]][%[[IDX]]] : tensor<?xindex>
[all …]
Dconvert-shape-constraints.mlir5 // CHECK-SAME: %[[LHS:.*]]: tensor<?xindex>,
6 // CHECK-SAME: %[[RHS:.*]]: tensor<?xindex>) -> !shape.witness {
10 // CHECK: %[[LHS_RANK:.*]] = dim %[[LHS]], %[[C0]] : tensor<?xindex>
11 // CHECK: %[[RHS_RANK:.*]] = dim %[[RHS]], %[[C0]] : tensor<?xindex>
15 … %[[LESSER_RANK_OPERAND:.*]] = select %[[LHS_RANK_ULE]], %[[LHS]], %[[RHS]] : tensor<?xindex>
16 … %[[GREATER_RANK_OPERAND:.*]] = select %[[LHS_RANK_ULE]], %[[RHS]], %[[LHS]] : tensor<?xindex>
19 …TER_RANK_OPERAND_EXTENT:.*]] = extract_element %[[GREATER_RANK_OPERAND]][%[[IV]]] : tensor<?xindex>
21 …NK_OPERAND_EXTENT:.*]] = extract_element %[[LESSER_RANK_OPERAND]][%[[IVSHIFTED]]] : tensor<?xindex>
31 func @cstr_broadcastable(%arg0: tensor<?xindex>, %arg1: tensor<?xindex>) -> !shape.witness {
32 %witness = shape.cstr_broadcastable %arg0, %arg1 : tensor<?xindex>, tensor<?xindex>
/external/tensorflow/tensorflow/compiler/mlir/hlo/tests/
Dhlo-transform-unranked.mlir8 %shape = shape.shape_of %a : tensor<*xf32> -> tensor<?xindex>
9 %num_elements = shape.num_elements %shape : tensor<?xindex> -> index
10 %flat_shape = tensor.from_elements %num_elements : tensor<1xindex>
12 : (tensor<*xf32>, tensor<1xindex>) -> tensor<?xf32>
19 : (tensor<?xf32>, tensor<?xindex>) -> tensor<*xf32>
30 // CHECK-NEXT: %[[SHAPE:.*]] = shape.shape_of %[[A]] : tensor<*xf32> -> tensor<?xindex>
32 // CHECK-NEXT: %[[FLAT_SHAPE:.*]] = tensor.from_elements %[[NUM_ELEMENTS]] : tensor<1xindex>
33 …"mhlo.dynamic_reshape"(%[[A]], %[[FLAT_SHAPE]]) : (tensor<*xf32>, tensor<1xindex>) -> tensor<?xf32>
35 …"mhlo.dynamic_reshape"(%[[FLAT_B]], %[[SHAPE]]) : (tensor<?xf32>, tensor<?xindex>) -> tensor<*xf32>
74 // CHECK: %[[FLAT_SHAPE:.*]] = tensor.from_elements %[[NUM_ELEMENTS]] : tensor<1xindex>
[all …]
Dchlo_legalize_to_hlo_broadcasts.mlir22 …ECK: %[[RESULT_EXTENTS:.+]] = tensor.cast %[[RESULT_S]] : tensor<?xindex> to tensor<2xindex>
43 …ECK-NEXT: %[[RESULT_EXTENTS:.+]] = tensor.cast %[[RESULT_S]] : tensor<?xindex> to tensor<2xindex>
44 …oadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?xf32>, tensor<2xindex>) -> tensor<?x?xf3…
45 …_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x?xf32>, tensor<2xindex>) -> tensor<?x?xf3…
64 // CHECK: %[[RESULT_EXTENTS:.+]] = tensor.cast %[[RESULT_S]] : tensor<?xindex> to tensor<2xindex>
65 …oadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?xf32>, tensor<2xindex>) -> tensor<?x?xf3…
66 …_dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<?x?xf32>, tensor<2xindex>) -> tensor<?x?xf3…
Dcanonicalize.mlir427 %0 = shape.shape_of %arg0 : tensor<?xf32> -> tensor<1xindex>
428 …oadcast_dimensions = dense<0> : tensor<1xi64> } : (tensor<?xf32>, tensor<1xindex>) -> tensor<?xf32>
437 %1 = shape.to_extent_tensor %0 : !shape.shape -> tensor<1xindex>
438 …oadcast_dimensions = dense<0> : tensor<1xi64> } : (tensor<?xf32>, tensor<1xindex>) -> tensor<?xf32>
480 func @dynamic_iota_is_static(%arg0 : tensor<1xindex>) -> tensor<4xi32> {
483 %0 = "mhlo.dynamic_iota"(%arg0) {iota_dimension = 0 : i64} : (tensor<1xindex>) -> tensor<4xi32>
488 func @dynamic_iota_broadcast(%arg0 : tensor<2xindex>) -> tensor<5x?xi32> {
490 …oadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<5xi32>, tensor<2xindex>) -> tensor<5x?xi3…
491 %0 = "mhlo.dynamic_iota"(%arg0) {iota_dimension = 0 : i64} : (tensor<2xindex>) -> tensor<5x?xi32>
498 func @dynamic_iota_broadcast_second(%arg0 : tensor<2xindex>) -> tensor<5x?xi32> {
[all …]
Dunfuse_batch_norm.mlir112 // CHECK-DAG: %[[TO_DIM_TENSOR:.+]] = tensor.from_elements %[[DIM]] : tensor<1xindex>
113 … {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>, tensor<1xindex>) -> tensor<?xf32>
120 …m_elements %[[INPUT_DIM_0]], %[[INPUT_DIM_1]], %[[INPUT_DIM_2]], %[[INPUT_DIM_3]] : tensor<4xindex>
121 …oadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?xf32>, tensor<4xindex>) -> tensor<?x?x?x…
122 …oadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?xf32>, tensor<4xindex>) -> tensor<?x?x?x…
123 …oadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?xf32>, tensor<4xindex>) -> tensor<?x?x?x…
124 …oadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?xf32>, tensor<4xindex>) -> tensor<?x?x?x…
/external/llvm-project/mlir/test/Dialect/Shape/
Dops.mlir19 func @extent_tensor_num_elements(%shape : tensor<?xindex>) -> index {
21 %num_elements = shape.reduce(%shape, %init) : tensor<?xindex> -> index {
38 %1 = shape.const_shape [4, 5, 6] : tensor<?xindex>
57 func @test_broadcast_extents() -> tensor<?xindex> {
58 %0 = shape.const_shape [10, 1, 57, 92] : tensor<?xindex>
59 %1 = shape.const_shape [4, 57, 92] : tensor<?xindex>
60 %2 = shape.broadcast %0, %1 : tensor<?xindex>, tensor<?xindex> -> tensor<?xindex>
61 return %2 : tensor<?xindex>
91 %2 = shape.const_shape [1, 2, 3] : tensor<?xindex>
95 func @test_shape_of(%arg0: tensor<?xf32>) -> tensor<?xindex> {
[all …]
Dcanonicalize.mlir4 func @f(%arg0: tensor<2x3x4xf32>) -> tensor<?xindex> {
5 // CHECK: shape.const_shape [2, 3, 4] : tensor<?xindex>
6 %0 = shape.shape_of %arg0 : tensor<2x3x4xf32> -> tensor<?xindex>
7 return %0 : tensor<?xindex>
65 func @broadcast() -> tensor<?xindex> {
66 // CHECK: shape.const_shape [7, 2] : tensor<?xindex>
67 %0 = shape.const_shape [1, 2] : tensor<?xindex>
68 %1 = shape.const_shape [7, 1] : tensor<?xindex>
70 : tensor<?xindex>, tensor<?xindex> -> tensor<?xindex>
71 return %2 : tensor<?xindex>
[all …]
Dinvalid.mlir38 func @reduce_op_arg1_wrong_type(%shape : tensor<?xindex>, %init : index) {
40 %num_elements = shape.reduce(%shape, %init) : tensor<?xindex> -> index {
94 %0 = shape.shape_of %value_arg : !shape.value_shape -> tensor<?xindex>
108 func @get_extent(%arg : tensor<?xindex>) -> index {
111 %result = shape.get_extent %arg, %c0 : tensor<?xindex>, !shape.size -> index
141 func @broadcast(%arg0 : !shape.shape, %arg1 : !shape.shape) -> tensor<?xindex> {
144 : !shape.shape, !shape.shape -> tensor<?xindex>
145 return %result : tensor<?xindex>
151 func @broadcast(%arg0 : !shape.shape, %arg1 : tensor<?xindex>) -> tensor<?xindex> {
154 : !shape.shape, tensor<?xindex> -> tensor<?xindex>
[all …]
Dshape-to-shape.mlir20 // CHECK-SAME: ([[ARG:%.*]]: tensor<?xindex>) -> index
21 func @num_elements_to_reduce_on_index(%shape : tensor<?xindex>) -> index {
22 %num_elements = shape.num_elements %shape : tensor<?xindex> -> index
26 // CHECK: [[NUM_ELEMENTS:%.*]] = shape.reduce([[ARG]], [[C1]]) : tensor<?xindex> -> index
/external/llvm-project/mlir/test/Dialect/Standard/
Dbufferize.mlir16 …ECK-SAME: %[[DYNAMIC_EXTENT:.*]]: index) -> tensor<?xindex> {
17 // CHECK: %[[MEMREF:.*]] = alloc(%[[DYNAMIC_EXTENT]]) : memref<?xindex>
23 // CHECK: store %[[ELEM]], %[[MEMREF]][%[[I]]] : memref<?xindex>
26 // CHECK: %[[RET:.*]] = tensor_load %[[MEMREF]] : memref<?xindex>
27 // CHECK: return %[[RET]] : tensor<?xindex>
29 func @dynamic_tensor_from_elements(%arg: tensor<*xf32>, %rank: index) -> tensor<?xindex> {
34 } : tensor<?xindex>
35 return %result : tensor<?xindex>
42 … %[[DYNAMIC_EXTENT:.*]]: index) -> tensor<16x?xindex> {
43 // CHECK: %[[MEMREF:.*]] = alloc(%[[DYNAMIC_EXTENT]]) : memref<16x?xindex>
[all …]
/external/llvm-project/mlir/test/mlir-cpu-runner/
Dmemref_reshape.mlir31 %shape = alloc() : memref<2xindex>
34 store %c3, %shape[%c0] : memref<2xindex>
35 store %c2, %shape[%c1] : memref<2xindex>
39 : (memref<2x3xf32>, memref<2xindex>) -> ()
41 : (memref<2x3xf32>, memref<2xindex>) -> ()
43 : (memref<2x3xf32>, memref<2xindex>) -> ()
45 : (memref<2x3xf32>, memref<2xindex>) -> ()
50 %shape : memref<2xindex>) {
52 : (memref<2x3xf32>, memref<2xindex>) -> memref<?x?xf32>
64 %shape : memref<2xindex>) {
[all …]
/external/llvm-project/mlir/test/Dialect/Linalg/
Dsparse_2d.mlir105 // CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
106 // CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
111 // CHECK: %[[VAL_14:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_13]]] : memref<?xindex>
113 // CHECK: %[[VAL_16:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_15]]] : memref<?xindex>
119 // CHECK: %[[VAL_23:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_21]]] : memref<?xindex>
164 // CHECK: %[[VAL_6:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
165 // CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
170 // CHECK: %[[VAL_12:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex>
172 // CHECK: %[[VAL_14:.*]] = load %[[VAL_6]]{{\[}}%[[VAL_13]]] : memref<?xindex>
174 // CHECK: %[[VAL_16:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
[all …]
Dsparse_3d.mlir112 // CHECK: %[[VAL_9:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
113 // CHECK: %[[VAL_10:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
121 // CHECK: %[[VAL_18:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_17]]] : memref<?xindex>
123 // CHECK: %[[VAL_20:.*]] = load %[[VAL_9]]{{\[}}%[[VAL_19]]] : memref<?xindex>
129 // CHECK: %[[VAL_27:.*]] = load %[[VAL_10]]{{\[}}%[[VAL_25]]] : memref<?xindex>
176 // CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
177 // CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
185 // CHECK: %[[VAL_16:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
187 // CHECK: %[[VAL_18:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_17]]] : memref<?xindex>
189 // CHECK: %[[VAL_20:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_19]]] : memref<?xindex>
[all …]
Dsparse_1d.mlir90 // CHECK: %[[VAL_7:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
91 // CHECK: %[[VAL_8:.*]] = alloca(%[[VAL_2]]) : memref<?xindex>
94 // CHECK: %[[VAL_11:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex>
95 // CHECK: %[[VAL_12:.*]] = load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex>
101 // CHECK: %[[VAL_19:.*]] = load %[[VAL_8]]{{\[}}%[[VAL_17]]] : memref<?xindex>
140 // CHECK: %[[VAL_4:.*]] = alloca(%[[VAL_1]]) : memref<?xindex>
141 // CHECK: %[[VAL_5:.*]] = alloca(%[[VAL_1]]) : memref<?xindex>
144 // CHECK: %[[VAL_8:.*]] = load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex>
145 // CHECK: %[[VAL_9:.*]] = load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex>
147 // CHECK: %[[VAL_11:.*]] = load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref<?xindex>
[all …]
/external/tensorflow/tensorflow/compiler/mlir/tools/kernel_gen/tests/
Dprint_memrefs.mlir9 %shape = alloca(%rank) : memref<?xindex>
12 store %dim, %shape[%i] : memref<?xindex>
16 %num_elem = alloca() : memref<1xindex>
17 store %c9000, %num_elem[%c0] : memref<1xindex>
19 : (memref<*xf16>, memref<1xindex>) -> memref<?xf16>
23 : (memref<?xf16>, memref<?xindex>) -> memref<*xf16>
31 // CHECK: [[SHAPE:%.*]] = alloca({{%.*}}) : memref<?xindex>
33 // CHECK: [[NUM_ELEM:%.*]] = alloca() : memref<1xindex>
37 // CHECK-SAME: : memref<1xindex> to memref<1xi64>
45 // CHECK-SAME: : memref<?xindex> to memref<?xi64>
Dops.mlir51 func @minimum_broadcast_shapes(%lhs: tensor<?xindex>, %rhs: tensor<?xindex>)
52 -> (tensor<?xindex>, tensor<?xindex>) {
54 tensor<?xindex>, tensor<?xindex> -> tensor<?xindex>, tensor<?xindex>
55 return %0, %1 : tensor<?xindex>, tensor<?xindex>
Dinvalid.mlir11 func @minimum_broadcast_shapes(%lhs: tensor<?xindex>, %rhs: tensor<?xindex>) {
14 tensor<?xindex>, tensor<?xindex> -> tensor<?xindex>
Dparallel_loops_to_sequential.mlir7 %buf: memref<?x?xindex>) {
10 store %sum_elem, %buf[%i0, %i1] : memref<?x?xindex>
17 // CHECK: store [[SUM]], {{%.*}}{{\[}}[[I_0]], [[I_1]]] : memref<?x?xindex>
Dbufferize.mlir41 // ALLOC: %[[MEM:.*]] = alloc(%[[SIZE]]) : memref<?xindex>
42 // ALLOCA: %[[MEM:.*]] = alloca(%[[SIZE]]) : memref<?xindex>
47 // CHECK: store %[[ELEM]], %[[MEM]][%[[I]]] : memref<?xindex>
55 } : tensor<?xindex>
57 %result = tensor.extract %tfe[%c0] : tensor<?xindex>
Dbuffer_reuse.mlir353 %arg_shape : memref<?xindex>,
354 %flat_shape : memref<1xindex>,
358 : (memref<*xi64>, memref<1xindex>) -> memref<?xi64>
374 : (memref<?xi64>, memref<?xindex>) -> memref<*xi64>
415 %0 = shape.shape_of %arg0 : memref<*xf32> -> tensor<?xindex>
416 %1 = shape.num_elements %0 : tensor<?xindex> -> index
419 %2 = alloc() : memref<1xindex>
420 store %1, %2[%c0] : memref<1xindex>
422 : (memref<*xf32>, memref<1xindex>) -> memref<?xf32>
442 %10 = tensor_to_memref %0 : memref<?xindex>
[all …]
/external/llvm/test/CodeGen/PowerPC/
D2011-12-06-SpillAndRestoreCR.ll44 %xindex.138 = phi i32 [ 0, %for.body ], [ %xindex.3.15, %for.inc15 ]
51 %xindex.234 = phi i32 [ %xindex.138, %for.cond5.preheader ], [ %xindex.3.15, %for.body7 ]
59 %xindex.3 = select i1 %cmp10, i32 %3, i32 %xindex.234
67 %xindex.3.1 = select i1 %cmp10.1, i32 %3, i32 %xindex.3
75 %xindex.3.2 = select i1 %cmp10.2, i32 %3, i32 %xindex.3.1
83 %xindex.3.3 = select i1 %cmp10.3, i32 %3, i32 %xindex.3.2
91 %xindex.3.4 = select i1 %cmp10.4, i32 %3, i32 %xindex.3.3
99 %xindex.3.5 = select i1 %cmp10.5, i32 %3, i32 %xindex.3.4
107 %xindex.3.6 = select i1 %cmp10.6, i32 %3, i32 %xindex.3.5
115 %xindex.3.7 = select i1 %cmp10.7, i32 %3, i32 %xindex.3.6
[all …]
/external/llvm-project/llvm/test/CodeGen/PowerPC/
D2011-12-06-SpillAndRestoreCR.ll44 %xindex.138 = phi i32 [ 0, %for.body ], [ %xindex.3.15, %for.inc15 ]
51 %xindex.234 = phi i32 [ %xindex.138, %for.cond5.preheader ], [ %xindex.3.15, %for.body7 ]
59 %xindex.3 = select i1 %cmp10, i32 %3, i32 %xindex.234
67 %xindex.3.1 = select i1 %cmp10.1, i32 %3, i32 %xindex.3
75 %xindex.3.2 = select i1 %cmp10.2, i32 %3, i32 %xindex.3.1
83 %xindex.3.3 = select i1 %cmp10.3, i32 %3, i32 %xindex.3.2
91 %xindex.3.4 = select i1 %cmp10.4, i32 %3, i32 %xindex.3.3
99 %xindex.3.5 = select i1 %cmp10.5, i32 %3, i32 %xindex.3.4
107 %xindex.3.6 = select i1 %cmp10.6, i32 %3, i32 %xindex.3.5
115 %xindex.3.7 = select i1 %cmp10.7, i32 %3, i32 %xindex.3.6
[all …]
/external/libjpeg-turbo/
Djctrans.c297 int blkn, ci, xindex, yindex, yoffset, blockcnt; in compress_output() local
330 for (xindex = 0; xindex < blockcnt; xindex++) in compress_output()
334 xindex = 0; in compress_output()
342 for (; xindex < compptr->MCU_width; xindex++) { in compress_output()
/external/tensorflow/tensorflow/compiler/mlir/xla/tests/
Dlegalize-tf-binary-elementwise.mlir50 …hape.broadcast %[[LHS_SHAPE]], %[[RHS_SHAPE]] : tensor<?xindex>, tensor<?xindex> -> tensor<?xindex>
51 …NEXT: %[[RESULT_EXTENTS:.+]] = tensor.cast %[[RESULT_SHAPE]] : tensor<?xindex> to tensor<2xindex>
219 …ape.broadcast %[[LHS_SHAPE1]], %[[RHS_SHAPE]] : tensor<?xindex>, tensor<?xindex> -> tensor<?xindex>
220 …NEXT: %[[RESULT_EXTENTS:.+]] = tensor.cast %[[RESULT_SHAPE]] : tensor<?xindex> to tensor<1xindex>
316 …pe.broadcast %[[LHS_SHAPE1]], %[[RHS_SHAPE1]] : tensor<?xindex>, tensor<?xindex> -> tensor<?xindex>
317 …NEXT: %[[RESULT_EXTENTS:.+]] = tensor.cast %[[RESULT_SHAPE]] : tensor<?xindex> to tensor<1xindex>

123