1 //
2 //
3 // Copyright 2015 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18
19 #include <grpc/grpc.h>
20 #include <grpc/support/time.h>
21 #include <grpcpp/channel.h>
22 #include <grpcpp/client_context.h>
23 #include <grpcpp/create_channel.h>
24 #include <grpcpp/impl/sync.h>
25 #include <grpcpp/resource_quota.h>
26 #include <grpcpp/server.h>
27 #include <grpcpp/server_builder.h>
28 #include <grpcpp/server_context.h>
29 #include <gtest/gtest.h>
30
31 #include <cinttypes>
32 #include <mutex>
33 #include <thread>
34
35 #include "absl/log/log.h"
36 #include "src/core/util/env.h"
37 #include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
38 #include "src/proto/grpc/testing/echo.grpc.pb.h"
39 #include "test/core/test_util/port.h"
40 #include "test/core/test_util/test_config.h"
41
42 const int kNumThreads = 10; // Number of threads
43 const int kNumAsyncSendThreads = 2;
44 const int kNumAsyncReceiveThreads = 5;
45 const int kNumAsyncServerThreads = 5;
46 const int kNumRpcs = 1000; // Number of RPCs per thread
47
48 namespace grpc {
49 namespace testing {
50
51 class TestServiceImpl : public grpc::testing::EchoTestService::Service {
52 public:
TestServiceImpl()53 TestServiceImpl() {}
54
Echo(ServerContext *,const EchoRequest * request,EchoResponse * response)55 Status Echo(ServerContext* /*context*/, const EchoRequest* request,
56 EchoResponse* response) override {
57 response->set_message(request->message());
58 return Status::OK;
59 }
60 };
61
62 template <class Service>
63 class CommonStressTest {
64 public:
CommonStressTest()65 CommonStressTest() : kMaxMessageSize_(8192) {
66 #if TARGET_OS_IPHONE
67 // Workaround Apple CFStream bug
68 grpc_core::SetEnv("grpc_cfstream", "0");
69 #endif
70 }
~CommonStressTest()71 virtual ~CommonStressTest() {}
72 virtual void SetUp() = 0;
73 virtual void TearDown() = 0;
74 virtual void ResetStub() = 0;
75 virtual bool AllowExhaustion() = 0;
GetStub()76 grpc::testing::EchoTestService::Stub* GetStub() { return stub_.get(); }
77
78 protected:
79 std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
80 std::unique_ptr<Server> server_;
81
82 virtual void SetUpStart(ServerBuilder* builder, Service* service) = 0;
SetUpStartCommon(ServerBuilder * builder,Service * service)83 void SetUpStartCommon(ServerBuilder* builder, Service* service) {
84 builder->RegisterService(service);
85 builder->SetMaxMessageSize(
86 kMaxMessageSize_); // For testing max message size.
87 }
SetUpEnd(ServerBuilder * builder)88 void SetUpEnd(ServerBuilder* builder) { server_ = builder->BuildAndStart(); }
TearDownStart()89 void TearDownStart() { server_->Shutdown(); }
TearDownEnd()90 void TearDownEnd() {}
91
92 private:
93 const int kMaxMessageSize_;
94 };
95
96 template <class Service>
97 class CommonStressTestInsecure : public CommonStressTest<Service> {
98 public:
ResetStub()99 void ResetStub() override {
100 std::shared_ptr<Channel> channel = grpc::CreateChannel(
101 server_address_.str(), InsecureChannelCredentials());
102 this->stub_ = grpc::testing::EchoTestService::NewStub(channel);
103 }
AllowExhaustion()104 bool AllowExhaustion() override { return false; }
105
106 protected:
SetUpStart(ServerBuilder * builder,Service * service)107 void SetUpStart(ServerBuilder* builder, Service* service) override {
108 int port = grpc_pick_unused_port_or_die();
109 this->server_address_ << "localhost:" << port;
110 // Setup server
111 builder->AddListeningPort(server_address_.str(),
112 InsecureServerCredentials());
113 this->SetUpStartCommon(builder, service);
114 }
115
116 private:
117 std::ostringstream server_address_;
118 };
119
120 template <class Service, bool allow_resource_exhaustion>
121 class CommonStressTestInproc : public CommonStressTest<Service> {
122 public:
ResetStub()123 void ResetStub() override {
124 ChannelArguments args;
125 std::shared_ptr<Channel> channel = this->server_->InProcessChannel(args);
126 this->stub_ = grpc::testing::EchoTestService::NewStub(channel);
127 }
AllowExhaustion()128 bool AllowExhaustion() override { return allow_resource_exhaustion; }
129
130 protected:
SetUpStart(ServerBuilder * builder,Service * service)131 void SetUpStart(ServerBuilder* builder, Service* service) override {
132 this->SetUpStartCommon(builder, service);
133 }
134 };
135
136 template <class BaseClass>
137 class CommonStressTestSyncServer : public BaseClass {
138 public:
SetUp()139 void SetUp() override {
140 ServerBuilder builder;
141 this->SetUpStart(&builder, &service_);
142 this->SetUpEnd(&builder);
143 }
TearDown()144 void TearDown() override {
145 this->TearDownStart();
146 this->TearDownEnd();
147 }
148
149 private:
150 TestServiceImpl service_;
151 };
152
153 template <class BaseClass>
154 class CommonStressTestSyncServerLowThreadCount : public BaseClass {
155 public:
SetUp()156 void SetUp() override {
157 ServerBuilder builder;
158 ResourceQuota quota;
159 this->SetUpStart(&builder, &service_);
160 quota.SetMaxThreads(4);
161 builder.SetResourceQuota(quota);
162 this->SetUpEnd(&builder);
163 }
TearDown()164 void TearDown() override {
165 this->TearDownStart();
166 this->TearDownEnd();
167 }
168
169 private:
170 TestServiceImpl service_;
171 };
172
173 template <class BaseClass>
174 class CommonStressTestAsyncServer : public BaseClass {
175 public:
CommonStressTestAsyncServer()176 CommonStressTestAsyncServer() : contexts_(kNumAsyncServerThreads * 100) {}
SetUp()177 void SetUp() override {
178 shutting_down_ = false;
179 ServerBuilder builder;
180 this->SetUpStart(&builder, &service_);
181 cq_ = builder.AddCompletionQueue();
182 this->SetUpEnd(&builder);
183 for (int i = 0; i < kNumAsyncServerThreads * 100; i++) {
184 RefreshContext(i);
185 }
186 for (int i = 0; i < kNumAsyncServerThreads; i++) {
187 server_threads_.emplace_back(&CommonStressTestAsyncServer::ProcessRpcs,
188 this);
189 }
190 }
TearDown()191 void TearDown() override {
192 {
193 grpc::internal::MutexLock l(&mu_);
194 this->TearDownStart();
195 shutting_down_ = true;
196 cq_->Shutdown();
197 }
198
199 for (int i = 0; i < kNumAsyncServerThreads; i++) {
200 server_threads_[i].join();
201 }
202
203 void* ignored_tag;
204 bool ignored_ok;
205 while (cq_->Next(&ignored_tag, &ignored_ok)) {
206 }
207 this->TearDownEnd();
208 }
209
210 private:
ProcessRpcs()211 void ProcessRpcs() {
212 void* tag;
213 bool ok;
214 while (cq_->Next(&tag, &ok)) {
215 if (ok) {
216 int i = static_cast<int>(reinterpret_cast<intptr_t>(tag));
217 switch (contexts_[i].state) {
218 case Context::READY: {
219 contexts_[i].state = Context::DONE;
220 EchoResponse send_response;
221 send_response.set_message(contexts_[i].recv_request.message());
222 contexts_[i].response_writer->Finish(send_response, Status::OK,
223 tag);
224 break;
225 }
226 case Context::DONE:
227 RefreshContext(i);
228 break;
229 }
230 }
231 }
232 }
RefreshContext(int i)233 void RefreshContext(int i) {
234 grpc::internal::MutexLock l(&mu_);
235 if (!shutting_down_) {
236 contexts_[i].state = Context::READY;
237 contexts_[i].srv_ctx.reset(new ServerContext);
238 contexts_[i].response_writer.reset(
239 new grpc::ServerAsyncResponseWriter<EchoResponse>(
240 contexts_[i].srv_ctx.get()));
241 service_.RequestEcho(contexts_[i].srv_ctx.get(),
242 &contexts_[i].recv_request,
243 contexts_[i].response_writer.get(), cq_.get(),
244 cq_.get(), reinterpret_cast<void*>(i));
245 }
246 }
247 struct Context {
248 std::unique_ptr<ServerContext> srv_ctx;
249 std::unique_ptr<grpc::ServerAsyncResponseWriter<EchoResponse>>
250 response_writer;
251 EchoRequest recv_request;
252 enum { READY, DONE } state;
253 };
254 std::vector<Context> contexts_;
255 grpc::testing::EchoTestService::AsyncService service_;
256 std::unique_ptr<ServerCompletionQueue> cq_;
257 bool shutting_down_;
258 grpc::internal::Mutex mu_;
259 std::vector<std::thread> server_threads_;
260 };
261
262 template <class Common>
263 class End2endTest : public ::testing::Test {
264 protected:
End2endTest()265 End2endTest() {}
SetUp()266 void SetUp() override { common_.SetUp(); }
TearDown()267 void TearDown() override { common_.TearDown(); }
ResetStub()268 void ResetStub() { common_.ResetStub(); }
269
270 Common common_;
271 };
272
SendRpc(grpc::testing::EchoTestService::Stub * stub,int num_rpcs,bool allow_exhaustion,gpr_atm * errors)273 static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs,
274 bool allow_exhaustion, gpr_atm* errors) {
275 EchoRequest request;
276 EchoResponse response;
277 request.set_message("Hello");
278
279 for (int i = 0; i < num_rpcs; ++i) {
280 ClientContext context;
281 Status s = stub->Echo(&context, request, &response);
282 EXPECT_TRUE(s.ok() || (allow_exhaustion &&
283 s.error_code() == StatusCode::RESOURCE_EXHAUSTED));
284 if (!s.ok()) {
285 if (!(allow_exhaustion &&
286 s.error_code() == StatusCode::RESOURCE_EXHAUSTED)) {
287 LOG(ERROR) << "RPC error: " << s.error_code() << ": "
288 << s.error_message();
289 }
290 gpr_atm_no_barrier_fetch_add(errors, gpr_atm{1});
291 } else {
292 EXPECT_EQ(response.message(), request.message());
293 }
294 }
295 }
296
297 typedef ::testing::Types<
298 CommonStressTestSyncServer<CommonStressTestInsecure<TestServiceImpl>>,
299 CommonStressTestAsyncServer<
300 CommonStressTestInsecure<grpc::testing::EchoTestService::AsyncService>>>
301 CommonTypes;
302 TYPED_TEST_SUITE(End2endTest, CommonTypes);
TYPED_TEST(End2endTest,ThreadStress)303 TYPED_TEST(End2endTest, ThreadStress) {
304 this->common_.ResetStub();
305 std::vector<std::thread> threads;
306 gpr_atm errors;
307 gpr_atm_rel_store(&errors, gpr_atm{0});
308 threads.reserve(kNumThreads);
309 for (int i = 0; i < kNumThreads; ++i) {
310 threads.emplace_back(SendRpc, this->common_.GetStub(), kNumRpcs,
311 this->common_.AllowExhaustion(), &errors);
312 }
313 for (int i = 0; i < kNumThreads; ++i) {
314 threads[i].join();
315 }
316 uint64_t error_cnt = static_cast<uint64_t>(gpr_atm_no_barrier_load(&errors));
317 if (error_cnt != 0) {
318 LOG(INFO) << "RPC error count: " << error_cnt;
319 }
320 // If this test allows resource exhaustion, expect that it actually sees some
321 if (this->common_.AllowExhaustion()) {
322 EXPECT_GT(error_cnt, 0);
323 }
324 }
325
326 template <class Common>
327 class AsyncClientEnd2endTest : public ::testing::Test {
328 protected:
AsyncClientEnd2endTest()329 AsyncClientEnd2endTest() : rpcs_outstanding_(0) {}
330
SetUp()331 void SetUp() override { common_.SetUp(); }
TearDown()332 void TearDown() override {
333 void* ignored_tag;
334 bool ignored_ok;
335 while (cq_.Next(&ignored_tag, &ignored_ok)) {
336 }
337 common_.TearDown();
338 }
339
Wait()340 void Wait() {
341 grpc::internal::MutexLock l(&mu_);
342 while (rpcs_outstanding_ != 0) {
343 cv_.Wait(&mu_);
344 }
345
346 cq_.Shutdown();
347 }
348
349 struct AsyncClientCall {
350 EchoResponse response;
351 ClientContext context;
352 Status status;
353 std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader;
354 };
355
AsyncSendRpc(int num_rpcs)356 void AsyncSendRpc(int num_rpcs) {
357 for (int i = 0; i < num_rpcs; ++i) {
358 AsyncClientCall* call = new AsyncClientCall;
359 EchoRequest request;
360 request.set_message("Hello: " + std::to_string(i));
361 call->response_reader =
362 common_.GetStub()->AsyncEcho(&call->context, request, &cq_);
363 call->response_reader->Finish(&call->response, &call->status, call);
364
365 grpc::internal::MutexLock l(&mu_);
366 rpcs_outstanding_++;
367 }
368 }
369
AsyncCompleteRpc()370 void AsyncCompleteRpc() {
371 while (true) {
372 void* got_tag;
373 bool ok = false;
374 if (!cq_.Next(&got_tag, &ok)) break;
375 AsyncClientCall* call = static_cast<AsyncClientCall*>(got_tag);
376 if (!ok) {
377 VLOG(2) << "Error: " << call->status.error_code();
378 }
379 delete call;
380
381 bool notify;
382 {
383 grpc::internal::MutexLock l(&mu_);
384 rpcs_outstanding_--;
385 notify = (rpcs_outstanding_ == 0);
386 }
387 if (notify) {
388 cv_.Signal();
389 }
390 }
391 }
392
393 Common common_;
394 CompletionQueue cq_;
395 grpc::internal::Mutex mu_;
396 grpc::internal::CondVar cv_;
397 int rpcs_outstanding_;
398 };
399
400 TYPED_TEST_SUITE(AsyncClientEnd2endTest, CommonTypes);
TYPED_TEST(AsyncClientEnd2endTest,ThreadStress)401 TYPED_TEST(AsyncClientEnd2endTest, ThreadStress) {
402 this->common_.ResetStub();
403 std::vector<std::thread> send_threads, completion_threads;
404 for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
405 completion_threads.emplace_back(
406 &AsyncClientEnd2endTest_ThreadStress_Test<TypeParam>::AsyncCompleteRpc,
407 this);
408 }
409 for (int i = 0; i < kNumAsyncSendThreads; ++i) {
410 send_threads.emplace_back(
411 &AsyncClientEnd2endTest_ThreadStress_Test<TypeParam>::AsyncSendRpc,
412 this, kNumRpcs);
413 }
414 for (int i = 0; i < kNumAsyncSendThreads; ++i) {
415 send_threads[i].join();
416 }
417
418 this->Wait();
419 for (int i = 0; i < kNumAsyncReceiveThreads; ++i) {
420 completion_threads[i].join();
421 }
422 }
423
424 } // namespace testing
425 } // namespace grpc
426
main(int argc,char ** argv)427 int main(int argc, char** argv) {
428 grpc::testing::TestEnvironment env(&argc, argv);
429 ::testing::InitGoogleTest(&argc, argv);
430 return RUN_ALL_TESTS();
431 }
432