/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include // Declares the operator #include #include #include #include #include #include #include using namespace ::testing; using exec_aten::IntArrayRef; using exec_aten::ScalarType; using exec_aten::Tensor; using torch::executor::testing::TensorFactory; class OpViewTest : public OperatorTest { protected: Tensor& op_view_copy_out(const Tensor& self, IntArrayRef size, Tensor& out) { return torch::executor::aten::view_copy_outf(context_, self, size, out); } template void run_view_test_cases( const Tensor& input, const std::vector>& out_shapes) { TensorFactory tf; for (std::vector size : out_shapes) { Tensor out = tf.ones(size); // The interface of op_view_copy_out should use int64_t as int, while // tensor size needs int32_t so we need to transfrom from int32_t to // int64_t to pass the size to op_view_copy_out function std::vector size_int64_t(size.size()); std::transform( size.begin(), size.end(), size_int64_t.begin(), [](int32_t x) { return (int64_t)x; }); Tensor ret = op_view_copy_out( input, exec_aten::ArrayRef( size_int64_t.data(), size_int64_t.size()), out); EXPECT_TENSOR_EQ(out, ret); EXPECT_TENSOR_DATA_EQ(input, out); } } // Test if op_view_copy_out works well under all kinds of legal input type. template void test_dtype() { TensorFactory tf; Tensor input = tf.make(/*sizes=*/{2, 4}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1}); // Differne kinds of output shape meet the requirement (have same numel as // input) std::vector> out_shapes = { {8}, {8, 1}, {1, 8}, {2, 4}, {4, 2}, {2, 2, 2}, {1, 2, 1, 2, 1, 2, 1}, }; run_view_test_cases(input, out_shapes); } template void test_empty_input() { TensorFactory tf; Tensor input = tf.make(/*sizes=*/{3, 0, 1, 2}, /*data=*/{}); // Differnet kinds of output shape meet the requirement (have same numel as // input) std::vector> out_shapes = { {6, 0}, {6, 0, 0}, {3, 0, 1, 2}, {1, 0, 2, 3}}; run_view_test_cases(input, out_shapes); } /* %python import torch torch.manual_seed(0) x = torch.randint(10, (3, 4)) res = x.view(2, 6) op = "op_view_copy_out" opt_setup_params = """ int64_t size[] = {2, 6}; """ opt_extra_params = "size," out_args = "out_shape, dynamism" dtype = "ScalarType::Int" check = "EXPECT_TENSOR_EQ" */ void test_dynamic_shape( const std::vector& out_shape, enum torch::executor::TensorShapeDynamism dynamism) { /* %python %rewrite(unary_op) */ TensorFactory tf; Tensor x = tf.make({3, 4}, {4, 9, 3, 0, 3, 9, 7, 3, 7, 3, 1, 6}); Tensor expected = tf.make({2, 6}, {4, 9, 3, 0, 3, 9, 7, 3, 7, 3, 1, 6}); int64_t size[] = {2, 6}; Tensor out = tf.zeros(out_shape, dynamism); op_view_copy_out(x, size, out); EXPECT_TENSOR_EQ(out, expected); } }; namespace { std::vector vector_32_to_64(std::vector vector_32) { std::vector vector_64(vector_32.size()); std::transform( vector_32.begin(), vector_32.end(), vector_64.begin(), [](int32_t x) { return (int64_t)x; }); return vector_64; } } // namespace // Regular test for op_view_copy_out. TEST_F(OpViewTest, AllDtypesSupported) { #define TEST_ENTRY(ctype, dtype) test_dtype(); ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY); #undef TEST_ENTRY } TEST_F(OpViewTest, EmptyInputSupported) { #define TEST_ENTRY(ctype, dtype) test_empty_input(); ET_FORALL_REAL_TYPES_AND(Bool, TEST_ENTRY); #undef TEST_ENTRY } TEST_F(OpViewTest, InputOutputMismatchedSizesDie) { TensorFactory tf; std::vector size_in = {3, 1, 1, 2}; std::vector size_out = {3, 2, 1, 2}; Tensor input = tf.make(size_in, /*data=*/{1, 2, 3, 4, 5, 6}); Tensor out = tf.ones(size_out); // The interface of op_view_copy_out should use int64_t as int, while tensor // size needs int32_t so we need to transfrom from int32_t to int64_t to pass // the size to op_view_copy_out function std::vector size_int64_t = vector_32_to_64(size_out); // The numel of input and output tensor should be same ET_EXPECT_KERNEL_FAILURE( context_, op_view_copy_out( input, exec_aten::ArrayRef( size_int64_t.data(), size_int64_t.size()), out)); } TEST_F(OpViewTest, SizeOutputMismatchedSizesDie) { TensorFactory tf; std::vector size = {3, 1, 1, 2}; std::vector size_target = {3, 2, 1, 2}; Tensor input = tf.make(size, /*data=*/{1, 2, 3, 4, 5, 6}); Tensor out = tf.ones(size); // The interface of op_view_copy_out should use int64_t as int, while tensor // size needs int32_t. So we need to transfrom from int32_t to int64_t to pass // the size to op_view_copy_out function std::vector size_int64_t = vector_32_to_64(size_target); // The target size and out.size() should be same ET_EXPECT_KERNEL_FAILURE( context_, op_view_copy_out( input, exec_aten::ArrayRef( size_int64_t.data(), size_int64_t.size()), out)); } TEST_F(OpViewTest, MismatchedTypesDie) { TensorFactory tf_in; TensorFactory tf_out; std::vector size = {3, 1, 1, 2}; Tensor input = tf_in.make(size, /*data=*/{1, 2, 3, 4, 5, 6}); Tensor out = tf_out.ones(size); // The interface of op_view_copy_out should use int64_t as int, while tensor // size needs int32_t. So we need to transfrom from int32_t to int64_t to pass // the size to op_view_copy_out function std::vector size_int64_t = vector_32_to_64(size); // DTYPE of input and output should be same. ET_EXPECT_KERNEL_FAILURE( context_, op_view_copy_out( input, exec_aten::ArrayRef( size_int64_t.data(), size_int64_t.size()), out)); } TEST_F(OpViewTest, SizeInfer) { TensorFactory tf_in; TensorFactory tf_out_valid, tf_out_invalid; std::vector in_size = {2, 2, 2}; std::vector out_size_view = {4, 2}; std::vector out_size_valid = {-1, 2}; std::vector out_size_invalid = {-1, -1}; Tensor input = tf_in.make(in_size, /*data=*/{1, 2, 3, 4, 5, 6, 7, 8}); Tensor out = tf_out_valid.ones(out_size_view); // The interface of op_view_copy_out should use int64_t as int, while tensor // size needs int32_t. So we need to transfrom from int32_t to int64_t to pass // the size to op_view_copy_out function std::vector valid_size_int64_t = vector_32_to_64(out_size_valid); std::vector invalid_size_int64_t = vector_32_to_64(out_size_invalid); // Inferring one dimension is valid. op_view_copy_out( input, exec_aten::ArrayRef( valid_size_int64_t.data(), valid_size_int64_t.size()), out); EXPECT_TENSOR_DATA_EQ(input, out); // Inferring two dimensions is invalid. ET_EXPECT_KERNEL_FAILURE( context_, op_view_copy_out( input, exec_aten::ArrayRef( invalid_size_int64_t.data(), invalid_size_int64_t.size()), out)); } #if !defined(USE_ATEN_LIB) TEST_F(OpViewTest, UpperBoundOutTensor) { TensorFactory tf; Tensor input = tf.make(/*sizes=*/{2, 4}, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1}); Tensor output = tf.zeros( /*sizes=*/{2, 2, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); std::vector size = {2, 2, 2}; Tensor ref_output = tf.make(size, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1}); std::vector size_int64_t(size.size()); std::transform(size.begin(), size.end(), size_int64_t.begin(), [](int32_t x) { return (int64_t)x; }); op_view_copy_out( input, exec_aten::ArrayRef(size_int64_t.data(), size_int64_t.size()), output); EXPECT_TENSOR_EQ(ref_output, output); output = tf.zeros( /*sizes=*/{1, 4, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); size = std::vector({1, 4, 2}); ref_output = tf.make(size, /*data=*/{0, 1, 1, 1, 0, 1, 0, 1}); size_int64_t = std::vector(size.size()); std::transform(size.begin(), size.end(), size_int64_t.begin(), [](int32_t x) { return (int64_t)x; }); size_int64_t[1] = -1; op_view_copy_out( input, exec_aten::ArrayRef(size_int64_t.data(), size_int64_t.size()), output); EXPECT_TENSOR_EQ(ref_output, output); } #endif TEST_F(OpViewTest, DynamicShapeUpperBoundSameAsExpected) { test_dynamic_shape( {2, 6}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); } TEST_F(OpViewTest, DynamicShapeUpperBoundLargerThanExpected) { if (!torch::executor::testing::SupportedFeatures::get()->output_resize) { GTEST_SKIP() << "Dynamic shape not supported"; } test_dynamic_shape( {10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND); } TEST_F(OpViewTest, DynamicShapeUnbound) { if (!torch::executor::testing::SupportedFeatures::get()->output_resize) { GTEST_SKIP() << "Dynamic shape not supported"; } test_dynamic_shape( {1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND); }