• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <gtest/gtest.h>
2 
3 #include "test/cpp/jit/test_utils.h"
4 #include "torch/csrc/jit/runtime/graph_executor.h"
5 #include "torch/jit.h"
6 #include "torch/script.h"
7 #include "torch/torch.h"
8 
9 namespace torch {
10 namespace jit {
11 
TEST(GraphExecutorTest,Basic_CUDA)12 TEST(GraphExecutorTest, Basic_CUDA) {
13   constexpr int batch_size = 4;
14   constexpr int input_size = 256;
15 
16   int hidden_size = 2 * input_size;
17 
18   auto input = at::randn({batch_size, input_size}, at::kCUDA);
19   auto hx = at::randn({batch_size, hidden_size}, at::kCUDA);
20   auto cx = at::randn({batch_size, hidden_size}, at::kCUDA);
21   auto w_ih = t_def(at::randn({4 * hidden_size, input_size}, at::kCUDA));
22   auto w_hh = t_def(at::randn({4 * hidden_size, hidden_size}, at::kCUDA));
23 
24   auto g = build_lstm();
25   GraphExecutor executor(g, "");
26   auto stack = createStack({input, hx, cx, w_ih, w_hh});
27   executor.run(stack);
28   ASSERT_EQ(stack.size(), 2);
29   auto [r0, r1] = lstm(input, hx, cx, w_ih, w_hh);
30   ASSERT_TRUE(almostEqual(stack[0].toTensor(), r0));
31   ASSERT_TRUE(almostEqual(stack[1].toTensor(), r1));
32 }
33 
TEST(GraphExecutorTest,runAsync_executor)34 TEST(GraphExecutorTest, runAsync_executor) {
35   /*
36   TODO: there are some problem with C++ parsing script program involving
37   fork. Use the test module below for now.
38   issue about this: github.com/pytorch/pytorch/issues/46368
39   The test module file is generated by following:
40     class DemoModule(torch.nn.Module):
41       def forward(self):
42         r1 = torch.jit.fork(torch.mm, torch.rand(100,100),torch.rand(100,100))
43         r2 = torch.jit.fork(torch.mm, torch.rand(100,100),torch.rand(100,100))
44         return r1.wait() + r2.wait()
45   demo = DemoModule()
46   torch.jit.save(torch.jit.script(demo), 'test_interpreter_async.pt')
47   */
48   std::string filePath(__FILE__);
49   auto testModelFile = filePath.substr(0, filePath.find_last_of("/\\") + 1);
50   testModelFile.append("test_interpreter_async.pt");
51   auto module = load(testModelFile);
52   auto graph = module.get_method("forward").graph();
53   GraphExecutor graphExecutor(graph, "");
54   auto asyncCounter = 0;
55   std::mutex mtx;
56   // a dummy executor which actually use at::launch, but add up a counter
57   auto launcher = [&](std::function<void()> f) {
58     mtx.lock();
59     ++asyncCounter;
60     mtx.unlock();
61     at::launch(std::move(f));
62   };
63   std::vector<IValue> stack;
64   // NOLINTNEXTLINE(modernize-use-emplace)
65   stack.push_back(module._ivalue());
66   graphExecutor.runAsync(stack, launcher)->wait();
67   ASSERT_TRUE(asyncCounter > 0);
68 }
69 
70 } // namespace jit
71 } // namespace torch
72