• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of this source tree.
7  */
8 
9 #include <executorch/kernels/portable/NativeFunctions.h> // Declares the operator
10 #include <executorch/kernels/test/TestUtil.h>
11 #include <executorch/runtime/core/exec_aten/exec_aten.h>
12 #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
13 #include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
14 
15 #include <gtest/gtest.h>
16 
17 using namespace ::testing;
18 using exec_aten::ScalarType;
19 using exec_aten::string_view;
20 using exec_aten::Tensor;
21 using torch::executor::testing::TensorFactory;
22 
23 // Note: This file is used for testing op_gelu for *portable kernel specific*.
24 // If your test case is generic and should be tested on all kernels, add it to
25 // executorch/kernels/test/op_gelu_test.cpp instead.
26 
op_gelu_out(const Tensor & self,string_view approximate,Tensor & out)27 Tensor& op_gelu_out(const Tensor& self, string_view approximate, Tensor& out) {
28   executorch::runtime::KernelRuntimeContext context{};
29   return torch::executor::native::gelu_out(context, self, approximate, out);
30 }
31 
TEST(OpGeluKernelTest,HandleInfAndNanInput)32 TEST(OpGeluKernelTest, HandleInfAndNanInput) {
33   TensorFactory<ScalarType::Float> tf;
34 
35   const std::vector<int32_t> sizes = {3, 2};
36 
37   Tensor in = tf.make(
38       sizes,
39       /*data=*/
40       {-0.4775,
41        -std::numeric_limits<float>::infinity(),
42        -0.3984,
43        NAN,
44        std::numeric_limits<float>::infinity(),
45        -0.4848});
46 
47   // Destination for the gelu.
48   Tensor out = tf.zeros(sizes);
49 
50   // Run full gelu.
51   op_gelu_out(in, "none", out);
52 
53   // Check that it matches the expected output.
54   EXPECT_TENSOR_CLOSE(
55       out,
56       tf.make(
57           sizes,
58           /*data=*/
59           {-0.15113, 0.0, -0.137515, NAN, INFINITY, -0.152183}));
60 
61   // Run tanh gelu appx.
62   op_gelu_out(in, "tanh", out);
63 
64   // Check that it matches the expected output.
65   EXPECT_TENSOR_CLOSE(
66       out,
67       tf.make(
68           sizes,
69           /*data=*/
70           {-0.151145, 0.0, -0.137522, NAN, INFINITY, -0.152199}));
71 }
72