• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 #include <cinttypes>
20 #include <deque>
21 #include <list>
22 #include <thread>
23 #include <unordered_map>
24 #include <vector>
25 
26 #include <grpc/support/alloc.h>
27 #include <grpc/support/log.h>
28 #include <grpc/support/string_util.h>
29 #include <grpcpp/channel.h>
30 #include <grpcpp/client_context.h>
31 #include <grpcpp/create_channel.h>
32 
33 #include "src/core/lib/gpr/env.h"
34 #include "src/core/lib/gprpp/host_port.h"
35 #include "src/core/lib/profiling/timers.h"
36 #include "src/proto/grpc/testing/worker_service.grpc.pb.h"
37 #include "test/core/util/port.h"
38 #include "test/core/util/test_config.h"
39 #include "test/cpp/qps/client.h"
40 #include "test/cpp/qps/driver.h"
41 #include "test/cpp/qps/histogram.h"
42 #include "test/cpp/qps/qps_worker.h"
43 #include "test/cpp/qps/stats.h"
44 #include "test/cpp/util/test_credentials_provider.h"
45 
46 using std::deque;
47 using std::list;
48 using std::thread;
49 using std::unique_ptr;
50 using std::vector;
51 
52 namespace grpc {
53 namespace testing {
get_host(const std::string & worker)54 static std::string get_host(const std::string& worker) {
55   absl::string_view host;
56   absl::string_view port;
57   grpc_core::SplitHostPort(worker.c_str(), &host, &port);
58   return std::string(host.data(), host.size());
59 }
60 
get_workers(const string & env_name)61 static deque<string> get_workers(const string& env_name) {
62   deque<string> out;
63   char* env = gpr_getenv(env_name.c_str());
64   if (!env) {
65     env = gpr_strdup("");
66   }
67   char* p = env;
68   if (strlen(env) != 0) {
69     for (;;) {
70       char* comma = strchr(p, ',');
71       if (comma) {
72         out.emplace_back(p, comma);
73         p = comma + 1;
74       } else {
75         out.emplace_back(p);
76         break;
77       }
78     }
79   }
80   if (out.size() == 0) {
81     gpr_log(GPR_ERROR,
82             "Environment variable \"%s\" does not contain a list of QPS "
83             "workers to use. Set it to a comma-separated list of "
84             "hostname:port pairs, starting with hosts that should act as "
85             "servers. E.g. export "
86             "%s=\"serverhost1:1234,clienthost1:1234,clienthost2:1234\"",
87             env_name.c_str(), env_name.c_str());
88   }
89   gpr_free(env);
90   return out;
91 }
92 
GetCredType(const std::string & worker_addr,const std::map<std::string,std::string> & per_worker_credential_types,const std::string & credential_type)93 std::string GetCredType(
94     const std::string& worker_addr,
95     const std::map<std::string, std::string>& per_worker_credential_types,
96     const std::string& credential_type) {
97   auto it = per_worker_credential_types.find(worker_addr);
98   if (it != per_worker_credential_types.end()) {
99     return it->second;
100   }
101   return credential_type;
102 }
103 
104 // helpers for postprocess_scenario_result
WallTime(const ClientStats & s)105 static double WallTime(const ClientStats& s) { return s.time_elapsed(); }
SystemTime(const ClientStats & s)106 static double SystemTime(const ClientStats& s) { return s.time_system(); }
UserTime(const ClientStats & s)107 static double UserTime(const ClientStats& s) { return s.time_user(); }
CliPollCount(const ClientStats & s)108 static double CliPollCount(const ClientStats& s) { return s.cq_poll_count(); }
SvrPollCount(const ServerStats & s)109 static double SvrPollCount(const ServerStats& s) { return s.cq_poll_count(); }
ServerWallTime(const ServerStats & s)110 static double ServerWallTime(const ServerStats& s) { return s.time_elapsed(); }
ServerSystemTime(const ServerStats & s)111 static double ServerSystemTime(const ServerStats& s) { return s.time_system(); }
ServerUserTime(const ServerStats & s)112 static double ServerUserTime(const ServerStats& s) { return s.time_user(); }
ServerTotalCpuTime(const ServerStats & s)113 static double ServerTotalCpuTime(const ServerStats& s) {
114   return s.total_cpu_time();
115 }
ServerIdleCpuTime(const ServerStats & s)116 static double ServerIdleCpuTime(const ServerStats& s) {
117   return s.idle_cpu_time();
118 }
Cores(int n)119 static int Cores(int n) { return n; }
120 
IsSuccess(const Status & s)121 static bool IsSuccess(const Status& s) {
122   if (s.ok()) return true;
123   // Since we shutdown servers and clients at the same time, they both can
124   // observe cancellation.  Thus, we consider CANCELLED as good status.
125   if (static_cast<StatusCode>(s.error_code()) == StatusCode::CANCELLED) {
126     return true;
127   }
128   // Since we shutdown servers and clients at the same time, server can close
129   // the socket before the client attempts to do that, and vice versa.  Thus
130   // receiving a "Socket closed" error is fine.
131   if (s.error_message() == "Socket closed") return true;
132   return false;
133 }
134 
135 // Postprocess ScenarioResult and populate result summary.
postprocess_scenario_result(ScenarioResult * result)136 static void postprocess_scenario_result(ScenarioResult* result) {
137   // Get latencies from ScenarioResult latencies histogram and populate to
138   // result summary.
139   Histogram histogram;
140   histogram.MergeProto(result->latencies());
141   result->mutable_summary()->set_latency_50(histogram.Percentile(50));
142   result->mutable_summary()->set_latency_90(histogram.Percentile(90));
143   result->mutable_summary()->set_latency_95(histogram.Percentile(95));
144   result->mutable_summary()->set_latency_99(histogram.Percentile(99));
145   result->mutable_summary()->set_latency_999(histogram.Percentile(99.9));
146 
147   // Calculate qps and cpu load for each client and then aggregate results for
148   // all clients
149   double qps = 0;
150   double client_system_cpu_load = 0, client_user_cpu_load = 0;
151   for (size_t i = 0; i < result->client_stats_size(); i++) {
152     auto client_stat = result->client_stats(i);
153     qps += client_stat.latencies().count() / client_stat.time_elapsed();
154     client_system_cpu_load +=
155         client_stat.time_system() / client_stat.time_elapsed();
156     client_user_cpu_load +=
157         client_stat.time_user() / client_stat.time_elapsed();
158   }
159   // Calculate cpu load for each server and then aggregate results for all
160   // servers
161   double server_system_cpu_load = 0, server_user_cpu_load = 0;
162   for (size_t i = 0; i < result->server_stats_size(); i++) {
163     auto server_stat = result->server_stats(i);
164     server_system_cpu_load +=
165         server_stat.time_system() / server_stat.time_elapsed();
166     server_user_cpu_load +=
167         server_stat.time_user() / server_stat.time_elapsed();
168   }
169   result->mutable_summary()->set_qps(qps);
170   // Populate the percentage of cpu load to result summary.
171   result->mutable_summary()->set_server_system_time(100 *
172                                                     server_system_cpu_load);
173   result->mutable_summary()->set_server_user_time(100 * server_user_cpu_load);
174   result->mutable_summary()->set_client_system_time(100 *
175                                                     client_system_cpu_load);
176   result->mutable_summary()->set_client_user_time(100 * client_user_cpu_load);
177 
178   // For Non-linux platform, get_cpu_usage() is not implemented. Thus,
179   // ServerTotalCpuTime and ServerIdleCpuTime are both 0.
180   if (average(result->server_stats(), ServerTotalCpuTime) == 0) {
181     result->mutable_summary()->set_server_cpu_usage(0);
182   } else {
183     auto server_cpu_usage =
184         100 - 100 * average(result->server_stats(), ServerIdleCpuTime) /
185                   average(result->server_stats(), ServerTotalCpuTime);
186     result->mutable_summary()->set_server_cpu_usage(server_cpu_usage);
187   }
188 
189   // Calculate and populate successful request per second and failed requests
190   // per seconds to result summary.
191   auto time_estimate = average(result->client_stats(), WallTime);
192   if (result->request_results_size() > 0) {
193     int64_t successes = 0;
194     int64_t failures = 0;
195     for (int i = 0; i < result->request_results_size(); i++) {
196       const RequestResultCount& rrc = result->request_results(i);
197       if (rrc.status_code() == 0) {
198         successes += rrc.count();
199       } else {
200         failures += rrc.count();
201       }
202     }
203     result->mutable_summary()->set_successful_requests_per_second(
204         successes / time_estimate);
205     result->mutable_summary()->set_failed_requests_per_second(failures /
206                                                               time_estimate);
207   }
208 
209   // Fill in data for other metrics required in result summary
210   auto qps_per_server_core = qps / sum(result->server_cores(), Cores);
211   result->mutable_summary()->set_qps_per_server_core(qps_per_server_core);
212   result->mutable_summary()->set_client_polls_per_request(
213       sum(result->client_stats(), CliPollCount) / histogram.Count());
214   result->mutable_summary()->set_server_polls_per_request(
215       sum(result->server_stats(), SvrPollCount) / histogram.Count());
216 
217   auto server_queries_per_cpu_sec =
218       histogram.Count() / (sum(result->server_stats(), ServerSystemTime) +
219                            sum(result->server_stats(), ServerUserTime));
220   auto client_queries_per_cpu_sec =
221       histogram.Count() / (sum(result->client_stats(), SystemTime) +
222                            sum(result->client_stats(), UserTime));
223 
224   result->mutable_summary()->set_server_queries_per_cpu_sec(
225       server_queries_per_cpu_sec);
226   result->mutable_summary()->set_client_queries_per_cpu_sec(
227       client_queries_per_cpu_sec);
228 }
229 
230 struct ClientData {
231   unique_ptr<WorkerService::Stub> stub;
232   unique_ptr<ClientReaderWriter<ClientArgs, ClientStatus>> stream;
233 };
234 
235 struct ServerData {
236   unique_ptr<WorkerService::Stub> stub;
237   unique_ptr<ClientReaderWriter<ServerArgs, ServerStatus>> stream;
238 };
239 
FinishClients(const std::vector<ClientData> & clients,const ClientArgs & client_mark)240 static void FinishClients(const std::vector<ClientData>& clients,
241                           const ClientArgs& client_mark) {
242   gpr_log(GPR_INFO, "Finishing clients");
243   for (size_t i = 0, i_end = clients.size(); i < i_end; i++) {
244     auto client = &clients[i];
245     if (!client->stream->Write(client_mark)) {
246       gpr_log(GPR_ERROR, "Couldn't write mark to client %zu", i);
247       GPR_ASSERT(false);
248     }
249     if (!client->stream->WritesDone()) {
250       gpr_log(GPR_ERROR, "Failed WritesDone for client %zu", i);
251       GPR_ASSERT(false);
252     }
253   }
254 }
255 
ReceiveFinalStatusFromClients(const std::vector<ClientData> & clients,Histogram & merged_latencies,std::unordered_map<int,int64_t> & merged_statuses,ScenarioResult & result)256 static void ReceiveFinalStatusFromClients(
257     const std::vector<ClientData>& clients, Histogram& merged_latencies,
258     std::unordered_map<int, int64_t>& merged_statuses, ScenarioResult& result) {
259   gpr_log(GPR_INFO, "Receiving final status from clients");
260   ClientStatus client_status;
261   for (size_t i = 0, i_end = clients.size(); i < i_end; i++) {
262     auto client = &clients[i];
263     // Read the client final status
264     if (client->stream->Read(&client_status)) {
265       gpr_log(GPR_INFO, "Received final status from client %zu", i);
266       const auto& stats = client_status.stats();
267       merged_latencies.MergeProto(stats.latencies());
268       for (int i = 0; i < stats.request_results_size(); i++) {
269         merged_statuses[stats.request_results(i).status_code()] +=
270             stats.request_results(i).count();
271       }
272       result.add_client_stats()->CopyFrom(stats);
273       // That final status should be the last message on the client stream
274       GPR_ASSERT(!client->stream->Read(&client_status));
275     } else {
276       gpr_log(GPR_ERROR, "Couldn't get final status from client %zu", i);
277       GPR_ASSERT(false);
278     }
279   }
280 }
281 
ShutdownClients(const std::vector<ClientData> & clients,ScenarioResult & result)282 static void ShutdownClients(const std::vector<ClientData>& clients,
283                             ScenarioResult& result) {
284   gpr_log(GPR_INFO, "Shutdown clients");
285   for (size_t i = 0, i_end = clients.size(); i < i_end; i++) {
286     auto client = &clients[i];
287     Status s = client->stream->Finish();
288     // Since we shutdown servers and clients at the same time, clients can
289     // observe cancellation.  Thus, we consider both OK and CANCELLED as good
290     // status.
291     const bool success = IsSuccess(s);
292     result.add_client_success(success);
293     if (!success) {
294       gpr_log(GPR_ERROR, "Client %zu had an error %s", i,
295               s.error_message().c_str());
296       GPR_ASSERT(false);
297     }
298   }
299 }
300 
FinishServers(const std::vector<ServerData> & servers,const ServerArgs & server_mark)301 static void FinishServers(const std::vector<ServerData>& servers,
302                           const ServerArgs& server_mark) {
303   gpr_log(GPR_INFO, "Finishing servers");
304   for (size_t i = 0, i_end = servers.size(); i < i_end; i++) {
305     auto server = &servers[i];
306     if (!server->stream->Write(server_mark)) {
307       gpr_log(GPR_ERROR, "Couldn't write mark to server %zu", i);
308       GPR_ASSERT(false);
309     }
310     if (!server->stream->WritesDone()) {
311       gpr_log(GPR_ERROR, "Failed WritesDone for server %zu", i);
312       GPR_ASSERT(false);
313     }
314   }
315 }
316 
ReceiveFinalStatusFromServer(const std::vector<ServerData> & servers,ScenarioResult & result)317 static void ReceiveFinalStatusFromServer(const std::vector<ServerData>& servers,
318                                          ScenarioResult& result) {
319   gpr_log(GPR_INFO, "Receiving final status from servers");
320   ServerStatus server_status;
321   for (size_t i = 0, i_end = servers.size(); i < i_end; i++) {
322     auto server = &servers[i];
323     // Read the server final status
324     if (server->stream->Read(&server_status)) {
325       gpr_log(GPR_INFO, "Received final status from server %zu", i);
326       result.add_server_stats()->CopyFrom(server_status.stats());
327       result.add_server_cores(server_status.cores());
328       // That final status should be the last message on the server stream
329       GPR_ASSERT(!server->stream->Read(&server_status));
330     } else {
331       gpr_log(GPR_ERROR, "Couldn't get final status from server %zu", i);
332       GPR_ASSERT(false);
333     }
334   }
335 }
336 
ShutdownServers(const std::vector<ServerData> & servers,ScenarioResult & result)337 static void ShutdownServers(const std::vector<ServerData>& servers,
338                             ScenarioResult& result) {
339   gpr_log(GPR_INFO, "Shutdown servers");
340   for (size_t i = 0, i_end = servers.size(); i < i_end; i++) {
341     auto server = &servers[i];
342     Status s = server->stream->Finish();
343     // Since we shutdown servers and clients at the same time, servers can
344     // observe cancellation.  Thus, we consider both OK and CANCELLED as good
345     // status.
346     const bool success = IsSuccess(s);
347     result.add_server_success(success);
348     if (!success) {
349       gpr_log(GPR_ERROR, "Server %zu had an error %s", i,
350               s.error_message().c_str());
351       GPR_ASSERT(false);
352     }
353   }
354 }
355 
356 std::vector<grpc::testing::Server*>* g_inproc_servers = nullptr;
357 
RunScenario(const ClientConfig & initial_client_config,size_t num_clients,const ServerConfig & initial_server_config,size_t num_servers,int warmup_seconds,int benchmark_seconds,int spawn_local_worker_count,const std::string & qps_server_target_override,const std::string & credential_type,const std::map<std::string,std::string> & per_worker_credential_types,bool run_inproc,int32_t median_latency_collection_interval_millis)358 std::unique_ptr<ScenarioResult> RunScenario(
359     const ClientConfig& initial_client_config, size_t num_clients,
360     const ServerConfig& initial_server_config, size_t num_servers,
361     int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count,
362     const std::string& qps_server_target_override,
363     const std::string& credential_type,
364     const std::map<std::string, std::string>& per_worker_credential_types,
365     bool run_inproc, int32_t median_latency_collection_interval_millis) {
366   if (run_inproc) {
367     g_inproc_servers = new std::vector<grpc::testing::Server*>;
368   }
369   // Log everything from the driver
370   gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG);
371 
372   // ClientContext allocations (all are destroyed at scope exit)
373   list<ClientContext> contexts;
374   auto alloc_context = [](list<ClientContext>* contexts) {
375     contexts->emplace_back();
376     auto context = &contexts->back();
377     context->set_wait_for_ready(true);
378     return context;
379   };
380 
381   // To be added to the result, containing the final configuration used for
382   // client and config (including host, etc.)
383   ClientConfig result_client_config;
384 
385   // Get client, server lists; ignore if inproc test
386   auto workers = (!run_inproc) ? get_workers("QPS_WORKERS") : deque<string>();
387   ClientConfig client_config = initial_client_config;
388 
389   // Spawn some local workers if desired
390   vector<unique_ptr<QpsWorker>> local_workers;
391   for (int i = 0; i < abs(spawn_local_worker_count); i++) {
392     // act as if we're a new test -- gets a good rng seed
393     static bool called_init = false;
394     if (!called_init) {
395       char args_buf[100];
396       strcpy(args_buf, "some-benchmark");
397       char* args[] = {args_buf};
398       grpc_test_init(1, args);
399       called_init = true;
400     }
401 
402     char addr[256];
403     // we use port # of -1 to indicate inproc
404     int driver_port = (!run_inproc) ? grpc_pick_unused_port_or_die() : -1;
405     local_workers.emplace_back(new QpsWorker(driver_port, 0, credential_type));
406     sprintf(addr, "localhost:%d", driver_port);
407     if (spawn_local_worker_count < 0) {
408       workers.push_front(addr);
409     } else {
410       workers.push_back(addr);
411     }
412   }
413   GPR_ASSERT(workers.size() != 0);
414 
415   // if num_clients is set to <=0, do dynamic sizing: all workers
416   // except for servers are clients
417   if (num_clients <= 0) {
418     num_clients = workers.size() - num_servers;
419   }
420 
421   // TODO(ctiller): support running multiple configurations, and binpack
422   // client/server pairs
423   // to available workers
424   GPR_ASSERT(workers.size() >= num_clients + num_servers);
425 
426   // Trim to just what we need
427   workers.resize(num_clients + num_servers);
428 
429   // Start servers
430   std::vector<ServerData> servers(num_servers);
431   std::unordered_map<string, std::deque<int>> hosts_cores;
432   ChannelArguments channel_args;
433 
434   for (size_t i = 0; i < num_servers; i++) {
435     gpr_log(GPR_INFO, "Starting server on %s (worker #%" PRIuPTR ")",
436             workers[i].c_str(), i);
437     if (!run_inproc) {
438       servers[i].stub = WorkerService::NewStub(grpc::CreateTestChannel(
439           workers[i],
440           GetCredType(workers[i], per_worker_credential_types, credential_type),
441           nullptr /* call creds */, {} /* interceptor creators */));
442     } else {
443       servers[i].stub = WorkerService::NewStub(
444           local_workers[i]->InProcessChannel(channel_args));
445     }
446 
447     const ServerConfig& server_config = initial_server_config;
448     if (server_config.core_limit() != 0) {
449       gpr_log(GPR_ERROR,
450               "server config core limit is set but ignored by driver");
451       GPR_ASSERT(false);
452     }
453 
454     ServerArgs args;
455     *args.mutable_setup() = server_config;
456     servers[i].stream = servers[i].stub->RunServer(alloc_context(&contexts));
457     if (!servers[i].stream->Write(args)) {
458       gpr_log(GPR_ERROR, "Could not write args to server %zu", i);
459       GPR_ASSERT(false);
460     }
461     ServerStatus init_status;
462     if (!servers[i].stream->Read(&init_status)) {
463       gpr_log(GPR_ERROR, "Server %zu did not yield initial status", i);
464       GPR_ASSERT(false);
465     }
466     if (qps_server_target_override.length() > 0) {
467       // overriding the qps server target only works if there is 1 server
468       GPR_ASSERT(num_servers == 1);
469       client_config.add_server_targets(qps_server_target_override);
470     } else if (run_inproc) {
471       std::string cli_target(INPROC_NAME_PREFIX);
472       cli_target += std::to_string(i);
473       client_config.add_server_targets(cli_target);
474     } else {
475       std::string host = get_host(workers[i]);
476       std::string cli_target =
477           grpc_core::JoinHostPort(host.c_str(), init_status.port());
478       client_config.add_server_targets(cli_target.c_str());
479     }
480   }
481 
482   client_config.set_median_latency_collection_interval_millis(
483       median_latency_collection_interval_millis);
484 
485   // Targets are all set by now
486   result_client_config = client_config;
487   // Start clients
488   std::vector<ClientData> clients(num_clients);
489   size_t channels_allocated = 0;
490   for (size_t i = 0; i < num_clients; i++) {
491     const auto& worker = workers[i + num_servers];
492     gpr_log(GPR_INFO, "Starting client on %s (worker #%" PRIuPTR ")",
493             worker.c_str(), i + num_servers);
494     if (!run_inproc) {
495       clients[i].stub = WorkerService::NewStub(grpc::CreateTestChannel(
496           worker,
497           GetCredType(worker, per_worker_credential_types, credential_type),
498           nullptr /* call creds */, {} /* interceptor creators */));
499     } else {
500       clients[i].stub = WorkerService::NewStub(
501           local_workers[i + num_servers]->InProcessChannel(channel_args));
502     }
503     ClientConfig per_client_config = client_config;
504 
505     if (initial_client_config.core_limit() != 0) {
506       gpr_log(GPR_ERROR, "client config core limit set but ignored");
507       GPR_ASSERT(false);
508     }
509 
510     // Reduce channel count so that total channels specified is held regardless
511     // of the number of clients available
512     size_t num_channels =
513         (client_config.client_channels() - channels_allocated) /
514         (num_clients - i);
515     channels_allocated += num_channels;
516     gpr_log(GPR_DEBUG, "Client %" PRIdPTR " gets %" PRIdPTR " channels", i,
517             num_channels);
518     per_client_config.set_client_channels(num_channels);
519 
520     ClientArgs args;
521     *args.mutable_setup() = per_client_config;
522     clients[i].stream = clients[i].stub->RunClient(alloc_context(&contexts));
523     if (!clients[i].stream->Write(args)) {
524       gpr_log(GPR_ERROR, "Could not write args to client %zu", i);
525       GPR_ASSERT(false);
526     }
527   }
528 
529   for (size_t i = 0; i < num_clients; i++) {
530     ClientStatus init_status;
531     if (!clients[i].stream->Read(&init_status)) {
532       gpr_log(GPR_ERROR, "Client %zu did not yield initial status", i);
533       GPR_ASSERT(false);
534     }
535   }
536 
537   // Send an initial mark: clients can use this to know that everything is ready
538   // to start
539   gpr_log(GPR_INFO, "Initiating");
540   ServerArgs server_mark;
541   server_mark.mutable_mark()->set_reset(true);
542   ClientArgs client_mark;
543   client_mark.mutable_mark()->set_reset(true);
544   ServerStatus server_status;
545   ClientStatus client_status;
546   for (size_t i = 0; i < num_clients; i++) {
547     auto client = &clients[i];
548     if (!client->stream->Write(client_mark)) {
549       gpr_log(GPR_ERROR, "Couldn't write mark to client %zu", i);
550       GPR_ASSERT(false);
551     }
552   }
553   for (size_t i = 0; i < num_clients; i++) {
554     auto client = &clients[i];
555     if (!client->stream->Read(&client_status)) {
556       gpr_log(GPR_ERROR, "Couldn't get status from client %zu", i);
557       GPR_ASSERT(false);
558     }
559   }
560 
561   // Let everything warmup
562   gpr_log(GPR_INFO, "Warming up");
563   gpr_timespec start = gpr_now(GPR_CLOCK_REALTIME);
564   gpr_sleep_until(
565       gpr_time_add(start, gpr_time_from_seconds(warmup_seconds, GPR_TIMESPAN)));
566 
567   // Start a run
568   gpr_log(GPR_INFO, "Starting");
569   for (size_t i = 0; i < num_servers; i++) {
570     auto server = &servers[i];
571     if (!server->stream->Write(server_mark)) {
572       gpr_log(GPR_ERROR, "Couldn't write mark to server %zu", i);
573       GPR_ASSERT(false);
574     }
575   }
576   for (size_t i = 0; i < num_clients; i++) {
577     auto client = &clients[i];
578     if (!client->stream->Write(client_mark)) {
579       gpr_log(GPR_ERROR, "Couldn't write mark to client %zu", i);
580       GPR_ASSERT(false);
581     }
582   }
583   for (size_t i = 0; i < num_servers; i++) {
584     auto server = &servers[i];
585     if (!server->stream->Read(&server_status)) {
586       gpr_log(GPR_ERROR, "Couldn't get status from server %zu", i);
587       GPR_ASSERT(false);
588     }
589   }
590   for (size_t i = 0; i < num_clients; i++) {
591     auto client = &clients[i];
592     if (!client->stream->Read(&client_status)) {
593       gpr_log(GPR_ERROR, "Couldn't get status from client %zu", i);
594       GPR_ASSERT(false);
595     }
596   }
597 
598   // Wait some time
599   gpr_log(GPR_INFO, "Running");
600   // Use gpr_sleep_until rather than this_thread::sleep_until to support
601   // compilers that don't work with this_thread
602   gpr_sleep_until(gpr_time_add(
603       start,
604       gpr_time_from_seconds(warmup_seconds + benchmark_seconds, GPR_TIMESPAN)));
605 
606   gpr_timer_set_enabled(0);
607 
608   // Finish a run
609   std::unique_ptr<ScenarioResult> result(new ScenarioResult);
610   Histogram merged_latencies;
611   std::unordered_map<int, int64_t> merged_statuses;
612 
613   // For the case where clients lead the test such as UNARY and
614   // STREAMING_FROM_CLIENT, clients need to finish completely while a server
615   // is running to prevent the clients from being stuck while waiting for
616   // the result.
617   bool client_finish_first =
618       (client_config.rpc_type() != STREAMING_FROM_SERVER);
619 
620   FinishClients(clients, client_mark);
621 
622   if (!client_finish_first) {
623     FinishServers(servers, server_mark);
624   }
625 
626   ReceiveFinalStatusFromClients(clients, merged_latencies, merged_statuses,
627                                 *result);
628   ShutdownClients(clients, *result);
629 
630   if (client_finish_first) {
631     FinishServers(servers, server_mark);
632   }
633 
634   ReceiveFinalStatusFromServer(servers, *result);
635   ShutdownServers(servers, *result);
636 
637   if (g_inproc_servers != nullptr) {
638     delete g_inproc_servers;
639   }
640 
641   merged_latencies.FillProto(result->mutable_latencies());
642   for (std::unordered_map<int, int64_t>::iterator it = merged_statuses.begin();
643        it != merged_statuses.end(); ++it) {
644     RequestResultCount* rrc = result->add_request_results();
645     rrc->set_status_code(it->first);
646     rrc->set_count(it->second);
647   }
648   postprocess_scenario_result(result.get());
649   return result;
650 }
651 
RunQuit(const std::string & credential_type,const std::map<std::string,std::string> & per_worker_credential_types)652 bool RunQuit(
653     const std::string& credential_type,
654     const std::map<std::string, std::string>& per_worker_credential_types) {
655   // Get client, server lists
656   bool result = true;
657   auto workers = get_workers("QPS_WORKERS");
658   if (workers.size() == 0) {
659     return false;
660   }
661 
662   for (size_t i = 0; i < workers.size(); i++) {
663     auto stub = WorkerService::NewStub(grpc::CreateTestChannel(
664         workers[i],
665         GetCredType(workers[i], per_worker_credential_types, credential_type),
666         nullptr /* call creds */, {} /* interceptor creators */));
667     Void dummy;
668     grpc::ClientContext ctx;
669     ctx.set_wait_for_ready(true);
670     Status s = stub->QuitWorker(&ctx, dummy, &dummy);
671     if (!s.ok()) {
672       gpr_log(GPR_ERROR, "Worker %zu could not be properly quit because %s", i,
673               s.error_message().c_str());
674       result = false;
675     }
676   }
677   return result;
678 }
679 
680 }  // namespace testing
681 }  // namespace grpc
682