1 //
2 //
3 // Copyright 2016 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18
19 #include <grpc/byte_buffer.h>
20 #include <grpc/byte_buffer_reader.h>
21 #include <grpc/credentials.h>
22 #include <grpc/grpc.h>
23 #include <grpc/grpc_security.h>
24 #include <grpc/impl/channel_arg_names.h>
25 #include <grpc/impl/propagation_bits.h>
26 #include <grpc/slice.h>
27 #include <grpc/status.h>
28 #include <grpc/support/time.h>
29 #include <stdint.h>
30 #include <stdio.h>
31 #include <string.h>
32
33 #include <algorithm>
34 #include <string>
35 #include <utility>
36 #include <vector>
37
38 #include "absl/flags/flag.h"
39 #include "absl/flags/parse.h"
40 #include "absl/log/check.h"
41 #include "absl/log/log.h"
42 #include "absl/strings/match.h"
43 #include "src/core/ext/transport/chaotic_good/client/chaotic_good_connector.h"
44 #include "src/core/lib/channel/channel_args.h"
45 #include "src/core/lib/slice/slice_internal.h"
46 #include "src/core/util/useful.h"
47 #include "test/core/memory_usage/memstats.h"
48 #include "test/core/test_util/test_config.h"
49
50 static grpc_channel* channel;
51 static grpc_completion_queue* cq;
52 static grpc_op metadata_ops[2];
53 static grpc_op status_ops[2];
54 static grpc_op snapshot_ops[6];
55 static grpc_op* op;
56
57 typedef struct {
58 grpc_call* call;
59 grpc_metadata_array initial_metadata_recv;
60 grpc_status_code status;
61 grpc_slice details;
62 grpc_metadata_array trailing_metadata_recv;
63 } fling_call;
64
65 // Statically allocate call data structs. Enough to accommodate 100000 ping-pong
66 // calls and 1 extra for the snapshot calls.
67 static fling_call calls[100001];
68
tag(intptr_t t)69 static void* tag(intptr_t t) { return reinterpret_cast<void*>(t); }
70
71 // A call is intentionally divided into two steps. First step is to initiate a
72 // call (i.e send and recv metadata). A call is outstanding after we initiated,
73 // so we can measure the call memory usage.
init_ping_pong_request(int call_idx)74 static void init_ping_pong_request(int call_idx) {
75 grpc_metadata_array_init(&calls[call_idx].initial_metadata_recv);
76
77 memset(metadata_ops, 0, sizeof(metadata_ops));
78 op = metadata_ops;
79 op->op = GRPC_OP_SEND_INITIAL_METADATA;
80 op->data.send_initial_metadata.count = 0;
81 op->flags = GRPC_INITIAL_METADATA_WAIT_FOR_READY;
82 op++;
83 op->op = GRPC_OP_RECV_INITIAL_METADATA;
84 op->data.recv_initial_metadata.recv_initial_metadata =
85 &calls[call_idx].initial_metadata_recv;
86 op++;
87
88 grpc_slice hostname = grpc_slice_from_static_string("localhost");
89 calls[call_idx].call = grpc_channel_create_call(
90 channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq,
91 grpc_slice_from_static_string("/Reflector/reflectUnary"), &hostname,
92 gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
93
94 CHECK(GRPC_CALL_OK == grpc_call_start_batch(calls[call_idx].call,
95 metadata_ops,
96 (size_t)(op - metadata_ops),
97 tag(call_idx), nullptr));
98 grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
99 }
100
101 // Second step is to finish the call (i.e recv status) and destroy the call.
finish_ping_pong_request(int call_idx)102 static void finish_ping_pong_request(int call_idx) {
103 grpc_metadata_array_init(&calls[call_idx].trailing_metadata_recv);
104
105 memset(status_ops, 0, sizeof(status_ops));
106 op = status_ops;
107 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
108 op->data.recv_status_on_client.trailing_metadata =
109 &calls[call_idx].trailing_metadata_recv;
110 op->data.recv_status_on_client.status = &calls[call_idx].status;
111 op->data.recv_status_on_client.status_details = &calls[call_idx].details;
112 op++;
113
114 CHECK(GRPC_CALL_OK == grpc_call_start_batch(calls[call_idx].call, status_ops,
115 (size_t)(op - status_ops),
116 tag(call_idx), nullptr));
117 grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
118 grpc_metadata_array_destroy(&calls[call_idx].initial_metadata_recv);
119 grpc_metadata_array_destroy(&calls[call_idx].trailing_metadata_recv);
120 grpc_slice_unref(calls[call_idx].details);
121 grpc_call_unref(calls[call_idx].call);
122 calls[call_idx].call = nullptr;
123 }
124
send_snapshot_request(int call_idx,grpc_slice call_type)125 static MemStats send_snapshot_request(int call_idx, grpc_slice call_type) {
126 grpc_metadata_array_init(&calls[call_idx].initial_metadata_recv);
127 grpc_metadata_array_init(&calls[call_idx].trailing_metadata_recv);
128
129 grpc_byte_buffer* response_payload_recv = nullptr;
130 memset(snapshot_ops, 0, sizeof(snapshot_ops));
131 op = snapshot_ops;
132
133 op->op = GRPC_OP_SEND_INITIAL_METADATA;
134 op->data.send_initial_metadata.count = 0;
135 op->flags = GRPC_INITIAL_METADATA_WAIT_FOR_READY;
136 op++;
137 op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
138 op++;
139 op->op = GRPC_OP_RECV_INITIAL_METADATA;
140 op->data.recv_initial_metadata.recv_initial_metadata =
141 &calls[call_idx].initial_metadata_recv;
142 op++;
143 op->op = GRPC_OP_RECV_MESSAGE;
144 op->data.recv_message.recv_message = &response_payload_recv;
145 op++;
146 op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
147 op->data.recv_status_on_client.trailing_metadata =
148 &calls[call_idx].trailing_metadata_recv;
149 op->data.recv_status_on_client.status = &calls[call_idx].status;
150 op->data.recv_status_on_client.status_details = &calls[call_idx].details;
151 op++;
152
153 grpc_slice hostname = grpc_slice_from_static_string("localhost");
154 calls[call_idx].call = grpc_channel_create_call(
155 channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq, call_type, &hostname,
156 gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
157 CHECK(GRPC_CALL_OK == grpc_call_start_batch(calls[call_idx].call,
158 snapshot_ops,
159 (size_t)(op - snapshot_ops),
160 (void*)nullptr, nullptr));
161 grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
162
163 LOG(INFO) << "Call " << call_idx << " status " << calls[call_idx].status
164 << " (" << grpc_core::StringViewFromSlice(calls[call_idx].details)
165 << ")";
166
167 CHECK_NE(response_payload_recv, nullptr);
168 grpc_byte_buffer_reader reader;
169 grpc_byte_buffer_reader_init(&reader, response_payload_recv);
170 grpc_slice response = grpc_byte_buffer_reader_readall(&reader);
171 MemStats snapshot =
172 *reinterpret_cast<MemStats*>(GRPC_SLICE_START_PTR(response));
173
174 grpc_metadata_array_destroy(&calls[call_idx].initial_metadata_recv);
175 grpc_metadata_array_destroy(&calls[call_idx].trailing_metadata_recv);
176 grpc_slice_unref(response);
177 grpc_byte_buffer_reader_destroy(&reader);
178 grpc_byte_buffer_destroy(response_payload_recv);
179 grpc_slice_unref(calls[call_idx].details);
180 calls[call_idx].details = grpc_empty_slice();
181 grpc_call_unref(calls[call_idx].call);
182 calls[call_idx].call = nullptr;
183
184 return snapshot;
185 }
186
187 // Create iterations calls, return MemStats when all outstanding
run_test_loop(int iterations,int * call_idx)188 std::pair<MemStats, MemStats> run_test_loop(int iterations, int* call_idx) {
189 grpc_event event;
190
191 // benchmark period
192 for (int i = 0; i < iterations; ++i) {
193 init_ping_pong_request(*call_idx + i + 1);
194 }
195
196 auto peak = std::make_pair(
197 // client
198 MemStats::Snapshot(),
199 // server
200 send_snapshot_request(
201 0, grpc_slice_from_static_string("Reflector/DestroyCalls")));
202
203 do {
204 event = grpc_completion_queue_next(
205 cq,
206 gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
207 gpr_time_from_micros(10000, GPR_TIMESPAN)),
208 nullptr);
209 } while (event.type != GRPC_QUEUE_TIMEOUT);
210
211 // second step - recv status and destroy call
212 for (int i = 0; i < iterations; ++i) {
213 finish_ping_pong_request(*call_idx + i + 1);
214 }
215
216 do {
217 event = grpc_completion_queue_next(
218 cq,
219 gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
220 gpr_time_from_micros(10000, GPR_TIMESPAN)),
221 nullptr);
222 } while (event.type != GRPC_QUEUE_TIMEOUT);
223
224 *call_idx += iterations;
225
226 return peak;
227 }
228
229 ABSL_FLAG(std::string, target, "localhost:443", "Target host:port");
230 ABSL_FLAG(int, warmup, 100, "Warmup iterations");
231 ABSL_FLAG(int, benchmark, 1000, "Benchmark iterations");
232 ABSL_FLAG(bool, minstack, false, "Use minimal stack");
233 ABSL_FLAG(bool, chaotic_good, false, "Use chaotic good");
234
main(int argc,char ** argv)235 int main(int argc, char** argv) {
236 absl::ParseCommandLine(argc, argv);
237
238 grpc_slice slice = grpc_slice_from_copied_string("x");
239 char* fake_argv[1];
240
241 CHECK_GE(argc, 1);
242 fake_argv[0] = argv[0];
243 grpc::testing::TestEnvironment env(&argc, argv);
244
245 grpc_init();
246
247 for (size_t k = 0; k < GPR_ARRAY_SIZE(calls); k++) {
248 calls[k].details = grpc_empty_slice();
249 }
250
251 cq = grpc_completion_queue_create_for_next(nullptr);
252
253 std::vector<grpc_arg> args_vec;
254 if (absl::GetFlag(FLAGS_minstack)) {
255 args_vec.push_back(grpc_channel_arg_integer_create(
256 const_cast<char*>(GRPC_ARG_MINIMAL_STACK), 1));
257 }
258 if (absl::GetFlag(FLAGS_chaotic_good)) {
259 args_vec.push_back(grpc_channel_arg_integer_create(
260 const_cast<char*>(GRPC_ARG_ENABLE_RETRIES), 0));
261 }
262 grpc_channel_args args = {args_vec.size(), args_vec.data()};
263
264 if (absl::GetFlag(FLAGS_chaotic_good)) {
265 channel = grpc_chaotic_good_channel_create(
266 absl::GetFlag(FLAGS_target).c_str(), &args);
267 } else {
268 channel = grpc_channel_create(absl::GetFlag(FLAGS_target).c_str(),
269 grpc_insecure_credentials_create(), &args);
270 }
271
272 int call_idx = 0;
273 const int warmup_iterations = absl::GetFlag(FLAGS_warmup);
274 const int benchmark_iterations = absl::GetFlag(FLAGS_benchmark);
275
276 // warmup period
277 MemStats server_benchmark_calls_start = send_snapshot_request(
278 0, grpc_slice_from_static_string("Reflector/SimpleSnapshot"));
279 MemStats client_benchmark_calls_start = MemStats::Snapshot();
280
281 run_test_loop(warmup_iterations, &call_idx);
282
283 std::pair<MemStats, MemStats> peak =
284 run_test_loop(benchmark_iterations, &call_idx);
285
286 MemStats client_calls_inflight = peak.first;
287 MemStats server_calls_inflight = peak.second;
288
289 grpc_channel_destroy(channel);
290 grpc_completion_queue_shutdown(cq);
291
292 grpc_event event;
293 do {
294 event = grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
295 nullptr);
296 } while (event.type != GRPC_QUEUE_SHUTDOWN);
297 grpc_slice_unref(slice);
298
299 grpc_completion_queue_destroy(cq);
300 grpc_shutdown_blocking();
301
302 const char* prefix = "";
303 if (absl::StartsWith(absl::GetFlag(FLAGS_target), "xds:")) prefix = "xds ";
304 printf("---------client stats--------\n");
305 printf("%sclient call memory usage: %f bytes per call\n", prefix,
306 static_cast<double>(client_calls_inflight.rss -
307 client_benchmark_calls_start.rss) /
308 benchmark_iterations * 1024);
309
310 printf("---------server stats--------\n");
311 printf("%sserver call memory usage: %f bytes per call\n", prefix,
312 static_cast<double>(server_calls_inflight.rss -
313 server_benchmark_calls_start.rss) /
314 benchmark_iterations * 1024);
315
316 return 0;
317 }
318