• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/kernels/test/supported_features.h>
12 #include <executorch/runtime/core/exec_aten/exec_aten.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15 #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
16 #include <gtest/gtest.h>
17 #include <cmath>
18 
19 using namespace ::testing;
20 using exec_aten::ArrayRef;
21 using exec_aten::optional;
22 using exec_aten::ScalarType;
23 using exec_aten::Tensor;
24 using torch::executor::testing::TensorFactory;
25 
26 class OpLogicalNotOutTest : public OperatorTest {
27  protected:
op_logical_not_out(const Tensor & input,Tensor & out)28   Tensor& op_logical_not_out(const Tensor& input, Tensor& out) {
29     return torch::executor::aten::logical_not_outf(context_, input, out);
30   }
31 
32   template <ScalarType IN_DTYPE, ScalarType OUT_DTYPE>
test_logical_not_out()33   void test_logical_not_out() {
34     TensorFactory<IN_DTYPE> tf_in;
35     TensorFactory<OUT_DTYPE> tf_out;
36 
37     // clang-format off
38     Tensor in = tf_in.make(
39       {2, 4},
40       {
41         0, 1, 0, 1,
42         1, 0, 1, 0
43       });
44     Tensor bool_in = tf_in.make(
45       {2, 4},
46       {
47         false, true,  false, true,
48         true,  false, true,  false,
49       });
50     // clang-format on
51 
52     Tensor out = tf_out.zeros({2, 4});
53     Tensor bool_out = tf_out.zeros({2, 4});
54 
55     op_logical_not_out(in, out);
56     // clang-format off
57     EXPECT_TENSOR_CLOSE(out, tf_out.make(
58       {2, 4},
59       {
60         1, 0, 1, 0,
61         0, 1, 0, 1
62       }));
63     // clang-format on
64 
65     op_logical_not_out(bool_in, out);
66     // clang-format off
67     EXPECT_TENSOR_CLOSE(out, tf_out.make(
68       {2, 4},
69       {
70         1, 0, 1, 0,
71         0, 1, 0, 1
72       }));
73     // clang-format on
74 
75     op_logical_not_out(in, bool_out);
76     // clang-format off
77     EXPECT_TENSOR_CLOSE(bool_out, tf_out.make(
78       {2, 4},
79       {
80         true,  false, true,  false,
81         false, true,  false, true
82       }));
83     // clang-format on
84   }
85 
86   template <ScalarType OUT_DTYPE>
test_logical_not_out_float()87   void test_logical_not_out_float() {
88     TensorFactory<ScalarType::Float> tf_float;
89     TensorFactory<OUT_DTYPE> tf_out;
90 
91     Tensor in = tf_float.make(
92         {1, 4},
93         {
94             INFINITY,
95             NAN,
96             -INFINITY,
97             0,
98         });
99     Tensor out = tf_out.zeros(/*size=*/{1, 4});
100 
101     op_logical_not_out(in, out);
102     EXPECT_TENSOR_CLOSE(out, tf_out.make(/*size=*/{1, 4}, {0, 0, 0, 1}));
103   }
104 };
105 
TEST_F(OpLogicalNotOutTest,MismatchedDimensionsDies)106 TEST_F(OpLogicalNotOutTest, MismatchedDimensionsDies) {
107   if (torch::executor::testing::SupportedFeatures::get()->is_aten) {
108     GTEST_SKIP() << "ATen kernel can handle mismatched dimensions";
109   }
110   TensorFactory<ScalarType::Float> tff;
111   const std::vector<int32_t> size{2, 2};
112 
113   Tensor in = tff.make(size, {0, 0, 1, 0});
114   Tensor out = tff.zeros(/*size=*/{4, 1});
115 
116   ET_EXPECT_KERNEL_FAILURE(context_, op_logical_not_out(in, out));
117 }
118 
TEST_F(OpLogicalNotOutTest,AllTypePasses)119 TEST_F(OpLogicalNotOutTest, AllTypePasses) {
120 // Use a two layer switch to hanldle each possible data pair
121 #define TEST_KERNEL(INPUT_CTYPE, INPUT_DTYPE, OUTPUT_CTYPE, OUTPUT_DTYPE) \
122   test_logical_not_out<ScalarType::INPUT_DTYPE, ScalarType::OUTPUT_DTYPE>();
123 
124 #define TEST_ENTRY(INPUT_CTYPE, INPUT_DTYPE) \
125   ET_FORALL_REAL_TYPES_WITH2(INPUT_CTYPE, INPUT_DTYPE, TEST_KERNEL);
126 
127   ET_FORALL_REAL_TYPES(TEST_ENTRY);
128 #undef TEST_ENTRY
129 #undef TEST_KERNEL
130 }
131 
TEST_F(OpLogicalNotOutTest,FloatSpecificTest)132 TEST_F(OpLogicalNotOutTest, FloatSpecificTest) {
133 // Float/double specific +/-Inf and NAN test
134 #define TEST_ENTRY_FLOAT_SPECIFIC_CASES(ctype, dtype) \
135   test_logical_not_out_float<ScalarType::dtype>();
136   ET_FORALL_FLOAT_TYPES(TEST_ENTRY_FLOAT_SPECIFIC_CASES);
137 #undef TEST_ENTRY_FLOAT_SPECIFIC_CASES
138 }
139