1// RUN: mlir-opt %s -linalg-bufferize -std-bufferize -tensor-constant-bufferize -func-bufferize \ 2// RUN: -convert-linalg-to-loops -convert-linalg-to-llvm -convert-std-to-llvm | \ 3// RUN: mlir-cpu-runner -e main -entry-point-result=void \ 4// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \ 5// RUN: | FileCheck %s 6 7func @main() { 8 %const = constant dense<10.0> : tensor<2xf32> 9 %insert_val = constant dense<20.0> : tensor<1xf32> 10 11 // Both of these subtensor_insert ops insert into the same original tensor 12 // value `%const`. This can easily cause bugs if at the memref level 13 // we attempt to write in-place into the memref that %const has been 14 // converted into. 15 %inserted_at_position_0 = subtensor_insert %insert_val into %const[0][1][1] : tensor<1xf32> into tensor<2xf32> 16 %inserted_at_position_1 = subtensor_insert %insert_val into %const[1][1][1] : tensor<1xf32> into tensor<2xf32> 17 18 %unranked_at_position_0 = tensor_cast %inserted_at_position_0 : tensor<2xf32> to tensor<*xf32> 19 call @print_memref_f32(%unranked_at_position_0) : (tensor<*xf32>) -> () 20 21 // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} 22 // CHECK-SAME: rank = 1 offset = 0 sizes = [2] strides = [1] data = 23 // CHECK-NEXT: [20, 10] 24 25 %unranked_at_position_1 = tensor_cast %inserted_at_position_1 : tensor<2xf32> to tensor<*xf32> 26 call @print_memref_f32(%unranked_at_position_1) : (tensor<*xf32>) -> () 27 28 // CHECK: Unranked Memref base@ = {{0x[-9a-f]*}} 29 // CHECK-SAME: rank = 1 offset = 0 sizes = [2] strides = [1] data = 30 // CHECK-NEXT: [10, 20] 31 32 return 33} 34 35func private @print_memref_f32(%ptr : tensor<*xf32>) 36