1 /*
2 *
3 * Copyright 2015 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <list>
20 #include <memory>
21 #include <mutex>
22 #include <sstream>
23 #include <string>
24 #include <thread>
25 #include <utility>
26 #include <vector>
27
28 #include <grpc/grpc.h>
29 #include <grpc/support/cpu.h>
30 #include <grpc/support/log.h>
31 #include <grpcpp/alarm.h>
32 #include <grpcpp/channel.h>
33 #include <grpcpp/client_context.h>
34
35 #include "src/proto/grpc/testing/benchmark_service.grpc.pb.h"
36 #include "test/cpp/qps/client.h"
37 #include "test/cpp/qps/usage_timer.h"
38
39 namespace grpc {
40 namespace testing {
41
42 /**
43 * Maintains context info per RPC
44 */
45 struct CallbackClientRpcContext {
CallbackClientRpcContextgrpc::testing::CallbackClientRpcContext46 CallbackClientRpcContext(BenchmarkService::Stub* stub)
47 : alarm_(nullptr), stub_(stub) {}
48
~CallbackClientRpcContextgrpc::testing::CallbackClientRpcContext49 ~CallbackClientRpcContext() {}
50
51 SimpleResponse response_;
52 ClientContext context_;
53 std::unique_ptr<Alarm> alarm_;
54 BenchmarkService::Stub* stub_;
55 };
56
BenchmarkStubCreator(const std::shared_ptr<Channel> & ch)57 static std::unique_ptr<BenchmarkService::Stub> BenchmarkStubCreator(
58 const std::shared_ptr<Channel>& ch) {
59 return BenchmarkService::NewStub(ch);
60 }
61
62 class CallbackClient
63 : public ClientImpl<BenchmarkService::Stub, SimpleRequest> {
64 public:
CallbackClient(const ClientConfig & config)65 CallbackClient(const ClientConfig& config)
66 : ClientImpl<BenchmarkService::Stub, SimpleRequest>(
67 config, BenchmarkStubCreator) {
68 num_threads_ = NumThreads(config);
69 rpcs_done_ = 0;
70
71 // Don't divide the fixed load among threads as the user threads
72 // only bootstrap the RPCs
73 SetupLoadTest(config, 1);
74 total_outstanding_rpcs_ =
75 config.client_channels() * config.outstanding_rpcs_per_channel();
76 }
77
~CallbackClient()78 virtual ~CallbackClient() {}
79
80 /**
81 * The main thread of the benchmark will be waiting on DestroyMultithreading.
82 * Increment the rpcs_done_ variable to signify that the Callback RPC
83 * after thread completion is done. When the last outstanding rpc increments
84 * the counter it should also signal the main thread's conditional variable.
85 */
NotifyMainThreadOfThreadCompletion()86 void NotifyMainThreadOfThreadCompletion() {
87 std::lock_guard<std::mutex> l(shutdown_mu_);
88 rpcs_done_++;
89 if (rpcs_done_ == total_outstanding_rpcs_) {
90 shutdown_cv_.notify_one();
91 }
92 }
93
NextRPCIssueTime()94 gpr_timespec NextRPCIssueTime() {
95 std::lock_guard<std::mutex> l(next_issue_time_mu_);
96 return Client::NextIssueTime(0);
97 }
98
99 protected:
100 size_t num_threads_;
101 size_t total_outstanding_rpcs_;
102 // The below mutex and condition variable is used by main benchmark thread to
103 // wait on completion of all RPCs before shutdown
104 std::mutex shutdown_mu_;
105 std::condition_variable shutdown_cv_;
106 // Number of rpcs done after thread completion
107 size_t rpcs_done_;
108 // Vector of Context data pointers for running a RPC
109 std::vector<std::unique_ptr<CallbackClientRpcContext>> ctx_;
110
111 virtual void InitThreadFuncImpl(size_t thread_idx) = 0;
112 virtual bool ThreadFuncImpl(Thread* t, size_t thread_idx) = 0;
113
ThreadFunc(size_t thread_idx,Thread * t)114 void ThreadFunc(size_t thread_idx, Thread* t) override {
115 InitThreadFuncImpl(thread_idx);
116 ThreadFuncImpl(t, thread_idx);
117 }
118
119 private:
120 std::mutex next_issue_time_mu_; // Used by next issue time
121
NumThreads(const ClientConfig & config)122 int NumThreads(const ClientConfig& config) {
123 int num_threads = config.async_client_threads();
124 if (num_threads <= 0) { // Use dynamic sizing
125 num_threads = cores_;
126 gpr_log(GPR_INFO, "Sizing callback client to %d threads", num_threads);
127 }
128 return num_threads;
129 }
130
131 /**
132 * Wait until all outstanding Callback RPCs are done
133 */
DestroyMultithreading()134 void DestroyMultithreading() final {
135 std::unique_lock<std::mutex> l(shutdown_mu_);
136 while (rpcs_done_ != total_outstanding_rpcs_) {
137 shutdown_cv_.wait(l);
138 }
139 EndThreads();
140 }
141 };
142
143 class CallbackUnaryClient final : public CallbackClient {
144 public:
CallbackUnaryClient(const ClientConfig & config)145 CallbackUnaryClient(const ClientConfig& config) : CallbackClient(config) {
146 for (int ch = 0; ch < config.client_channels(); ch++) {
147 for (int i = 0; i < config.outstanding_rpcs_per_channel(); i++) {
148 ctx_.emplace_back(
149 new CallbackClientRpcContext(channels_[ch].get_stub()));
150 }
151 }
152 StartThreads(num_threads_);
153 }
~CallbackUnaryClient()154 ~CallbackUnaryClient() {}
155
156 protected:
ThreadFuncImpl(Thread * t,size_t thread_idx)157 bool ThreadFuncImpl(Thread* t, size_t thread_idx) override {
158 for (size_t vector_idx = thread_idx; vector_idx < total_outstanding_rpcs_;
159 vector_idx += num_threads_) {
160 ScheduleRpc(t, vector_idx);
161 }
162 return true;
163 }
164
InitThreadFuncImpl(size_t)165 void InitThreadFuncImpl(size_t /*thread_idx*/) override { return; }
166
167 private:
ScheduleRpc(Thread * t,size_t vector_idx)168 void ScheduleRpc(Thread* t, size_t vector_idx) {
169 if (!closed_loop_) {
170 gpr_timespec next_issue_time = NextRPCIssueTime();
171 // Start an alarm callback to run the internal callback after
172 // next_issue_time
173 if (ctx_[vector_idx]->alarm_ == nullptr) {
174 ctx_[vector_idx]->alarm_.reset(new Alarm);
175 }
176 ctx_[vector_idx]->alarm_->experimental().Set(
177 next_issue_time, [this, t, vector_idx](bool /*ok*/) {
178 IssueUnaryCallbackRpc(t, vector_idx);
179 });
180 } else {
181 IssueUnaryCallbackRpc(t, vector_idx);
182 }
183 }
184
IssueUnaryCallbackRpc(Thread * t,size_t vector_idx)185 void IssueUnaryCallbackRpc(Thread* t, size_t vector_idx) {
186 GPR_TIMER_SCOPE("CallbackUnaryClient::ThreadFunc", 0);
187 double start = UsageTimer::Now();
188 ctx_[vector_idx]->stub_->experimental_async()->UnaryCall(
189 (&ctx_[vector_idx]->context_), &request_, &ctx_[vector_idx]->response_,
190 [this, t, start, vector_idx](grpc::Status s) {
191 // Update Histogram with data from the callback run
192 HistogramEntry entry;
193 if (s.ok()) {
194 entry.set_value((UsageTimer::Now() - start) * 1e9);
195 }
196 entry.set_status(s.error_code());
197 t->UpdateHistogram(&entry);
198
199 if (ThreadCompleted() || !s.ok()) {
200 // Notify thread of completion
201 NotifyMainThreadOfThreadCompletion();
202 } else {
203 // Reallocate ctx for next RPC
204 ctx_[vector_idx].reset(
205 new CallbackClientRpcContext(ctx_[vector_idx]->stub_));
206 // Schedule a new RPC
207 ScheduleRpc(t, vector_idx);
208 }
209 });
210 }
211 };
212
213 class CallbackStreamingClient : public CallbackClient {
214 public:
CallbackStreamingClient(const ClientConfig & config)215 CallbackStreamingClient(const ClientConfig& config)
216 : CallbackClient(config),
217 messages_per_stream_(config.messages_per_stream()) {
218 for (int ch = 0; ch < config.client_channels(); ch++) {
219 for (int i = 0; i < config.outstanding_rpcs_per_channel(); i++) {
220 ctx_.emplace_back(
221 new CallbackClientRpcContext(channels_[ch].get_stub()));
222 }
223 }
224 StartThreads(num_threads_);
225 }
~CallbackStreamingClient()226 ~CallbackStreamingClient() {}
227
AddHistogramEntry(double start,bool ok,Thread * thread_ptr)228 void AddHistogramEntry(double start, bool ok, Thread* thread_ptr) {
229 // Update Histogram with data from the callback run
230 HistogramEntry entry;
231 if (ok) {
232 entry.set_value((UsageTimer::Now() - start) * 1e9);
233 }
234 thread_ptr->UpdateHistogram(&entry);
235 }
236
messages_per_stream()237 int messages_per_stream() { return messages_per_stream_; }
238
239 protected:
240 const int messages_per_stream_;
241 };
242
243 class CallbackStreamingPingPongClient : public CallbackStreamingClient {
244 public:
CallbackStreamingPingPongClient(const ClientConfig & config)245 CallbackStreamingPingPongClient(const ClientConfig& config)
246 : CallbackStreamingClient(config) {}
~CallbackStreamingPingPongClient()247 ~CallbackStreamingPingPongClient() {}
248 };
249
250 class CallbackStreamingPingPongReactor final
251 : public grpc::experimental::ClientBidiReactor<SimpleRequest,
252 SimpleResponse> {
253 public:
CallbackStreamingPingPongReactor(CallbackStreamingPingPongClient * client,std::unique_ptr<CallbackClientRpcContext> ctx)254 CallbackStreamingPingPongReactor(
255 CallbackStreamingPingPongClient* client,
256 std::unique_ptr<CallbackClientRpcContext> ctx)
257 : client_(client), ctx_(std::move(ctx)), messages_issued_(0) {}
258
StartNewRpc()259 void StartNewRpc() {
260 ctx_->stub_->experimental_async()->StreamingCall(&(ctx_->context_), this);
261 write_time_ = UsageTimer::Now();
262 StartWrite(client_->request());
263 writes_done_started_.clear();
264 StartCall();
265 }
266
OnWriteDone(bool ok)267 void OnWriteDone(bool ok) override {
268 if (!ok) {
269 gpr_log(GPR_ERROR, "Error writing RPC");
270 }
271 if ((!ok || client_->ThreadCompleted()) &&
272 !writes_done_started_.test_and_set()) {
273 StartWritesDone();
274 }
275 StartRead(&ctx_->response_);
276 }
277
OnReadDone(bool ok)278 void OnReadDone(bool ok) override {
279 client_->AddHistogramEntry(write_time_, ok, thread_ptr_);
280
281 if (client_->ThreadCompleted() || !ok ||
282 (client_->messages_per_stream() != 0 &&
283 ++messages_issued_ >= client_->messages_per_stream())) {
284 if (!ok) {
285 gpr_log(GPR_ERROR, "Error reading RPC");
286 }
287 if (!writes_done_started_.test_and_set()) {
288 StartWritesDone();
289 }
290 return;
291 }
292 if (!client_->IsClosedLoop()) {
293 gpr_timespec next_issue_time = client_->NextRPCIssueTime();
294 // Start an alarm callback to run the internal callback after
295 // next_issue_time
296 ctx_->alarm_->experimental().Set(next_issue_time, [this](bool /*ok*/) {
297 write_time_ = UsageTimer::Now();
298 StartWrite(client_->request());
299 });
300 } else {
301 write_time_ = UsageTimer::Now();
302 StartWrite(client_->request());
303 }
304 }
305
OnDone(const Status & s)306 void OnDone(const Status& s) override {
307 if (client_->ThreadCompleted() || !s.ok()) {
308 client_->NotifyMainThreadOfThreadCompletion();
309 return;
310 }
311 ctx_.reset(new CallbackClientRpcContext(ctx_->stub_));
312 ScheduleRpc();
313 }
314
ScheduleRpc()315 void ScheduleRpc() {
316 if (!client_->IsClosedLoop()) {
317 gpr_timespec next_issue_time = client_->NextRPCIssueTime();
318 // Start an alarm callback to run the internal callback after
319 // next_issue_time
320 if (ctx_->alarm_ == nullptr) {
321 ctx_->alarm_.reset(new Alarm);
322 }
323 ctx_->alarm_->experimental().Set(next_issue_time,
324 [this](bool /*ok*/) { StartNewRpc(); });
325 } else {
326 StartNewRpc();
327 }
328 }
329
set_thread_ptr(Client::Thread * ptr)330 void set_thread_ptr(Client::Thread* ptr) { thread_ptr_ = ptr; }
331
332 CallbackStreamingPingPongClient* client_;
333 std::unique_ptr<CallbackClientRpcContext> ctx_;
334 std::atomic_flag writes_done_started_;
335 Client::Thread* thread_ptr_; // Needed to update histogram entries
336 double write_time_; // Track ping-pong round start time
337 int messages_issued_; // Messages issued by this stream
338 };
339
340 class CallbackStreamingPingPongClientImpl final
341 : public CallbackStreamingPingPongClient {
342 public:
CallbackStreamingPingPongClientImpl(const ClientConfig & config)343 CallbackStreamingPingPongClientImpl(const ClientConfig& config)
344 : CallbackStreamingPingPongClient(config) {
345 for (size_t i = 0; i < total_outstanding_rpcs_; i++)
346 reactor_.emplace_back(
347 new CallbackStreamingPingPongReactor(this, std::move(ctx_[i])));
348 }
~CallbackStreamingPingPongClientImpl()349 ~CallbackStreamingPingPongClientImpl() {}
350
ThreadFuncImpl(Client::Thread * t,size_t thread_idx)351 bool ThreadFuncImpl(Client::Thread* t, size_t thread_idx) override {
352 for (size_t vector_idx = thread_idx; vector_idx < total_outstanding_rpcs_;
353 vector_idx += num_threads_) {
354 reactor_[vector_idx]->set_thread_ptr(t);
355 reactor_[vector_idx]->ScheduleRpc();
356 }
357 return true;
358 }
359
InitThreadFuncImpl(size_t)360 void InitThreadFuncImpl(size_t /*thread_idx*/) override {}
361
362 private:
363 std::vector<std::unique_ptr<CallbackStreamingPingPongReactor>> reactor_;
364 };
365
366 // TODO(mhaidry) : Implement Streaming from client, server and both ways
367
CreateCallbackClient(const ClientConfig & config)368 std::unique_ptr<Client> CreateCallbackClient(const ClientConfig& config) {
369 switch (config.rpc_type()) {
370 case UNARY:
371 return std::unique_ptr<Client>(new CallbackUnaryClient(config));
372 case STREAMING:
373 return std::unique_ptr<Client>(
374 new CallbackStreamingPingPongClientImpl(config));
375 case STREAMING_FROM_CLIENT:
376 case STREAMING_FROM_SERVER:
377 case STREAMING_BOTH_WAYS:
378 assert(false);
379 return nullptr;
380 default:
381 assert(false);
382 return nullptr;
383 }
384 }
385
386 } // namespace testing
387 } // namespace grpc
388