1 //
2 //
3 // Copyright 2016 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18
19 #include <signal.h>
20 #include <stdint.h>
21 #include <stdlib.h>
22 #include <time.h>
23
24 #ifndef _WIN32
25 // This is for _exit() below, which is temporary.
26 #include <unistd.h>
27 #endif
28
29 #include <grpc/byte_buffer.h>
30 #include <grpc/credentials.h>
31 #include <grpc/grpc.h>
32 #include <grpc/grpc_security.h>
33 #include <grpc/impl/channel_arg_names.h>
34 #include <grpc/slice.h>
35 #include <grpc/status.h>
36 #include <grpc/support/alloc.h>
37 #include <grpc/support/time.h>
38
39 #include <algorithm>
40 #include <string>
41 #include <vector>
42
43 #include "absl/base/attributes.h"
44 #include "absl/flags/flag.h"
45 #include "absl/flags/parse.h"
46 #include "absl/log/check.h"
47 #include "absl/log/log.h"
48 #include "absl/status/status.h"
49 #include "src/core/ext/transport/chaotic_good/server/chaotic_good_server.h"
50 #include "src/core/lib/channel/channel_args.h"
51 #include "src/core/util/host_port.h"
52 #include "src/core/xds/grpc/xds_enabled_server.h"
53 #include "test/core/end2end/data/ssl_test_data.h"
54 #include "test/core/memory_usage/memstats.h"
55 #include "test/core/test_util/port.h"
56 #include "test/core/test_util/test_config.h"
57
58 ABSL_FLAG(std::string, bind, "", "Bind host:port");
59 ABSL_FLAG(bool, secure, false, "Use security");
60 ABSL_FLAG(bool, minstack, false, "Use minimal stack");
61 ABSL_FLAG(bool, use_xds, false, "Use xDS");
62 ABSL_FLAG(bool, chaotic_good, false, "Use chaotic good");
63
64 static grpc_completion_queue* cq;
65 static grpc_server* server;
66 static grpc_op metadata_ops[2];
67 static grpc_op snapshot_ops[5];
68 static grpc_op status_op;
69 static int got_sigint = 0;
70 static grpc_byte_buffer* payload_buffer = nullptr;
71 static int was_cancelled = 2;
72
tag(intptr_t t)73 static void* tag(intptr_t t) { return reinterpret_cast<void*>(t); }
74
75 typedef enum {
76 FLING_SERVER_NEW_REQUEST = 1,
77 FLING_SERVER_SEND_INIT_METADATA,
78 FLING_SERVER_WAIT_FOR_DESTROY,
79 FLING_SERVER_SEND_STATUS_FLING_CALL,
80 FLING_SERVER_SEND_STATUS_SNAPSHOT,
81 FLING_SERVER_BATCH_SEND_STATUS_FLING_CALL
82 } fling_server_tags;
83
84 typedef struct {
85 fling_server_tags state;
86 grpc_call* call;
87 grpc_call_details call_details;
88 grpc_metadata_array request_metadata_recv;
89 grpc_metadata_array initial_metadata_send;
90 } fling_call;
91
92 // hold up to 100000 calls and 6 snaphost calls
93 static fling_call calls[1000006];
94
request_call_unary(int call_idx)95 static void request_call_unary(int call_idx) {
96 if (call_idx == static_cast<int>(sizeof(calls) / sizeof(fling_call))) {
97 LOG(INFO) << "Used all call slots (10000) on server. Server exit.";
98 _exit(0);
99 }
100 grpc_metadata_array_init(&calls[call_idx].request_metadata_recv);
101 grpc_server_request_call(
102 server, &calls[call_idx].call, &calls[call_idx].call_details,
103 &calls[call_idx].request_metadata_recv, cq, cq, &calls[call_idx]);
104 }
105
send_initial_metadata_unary(void * tag)106 static void send_initial_metadata_unary(void* tag) {
107 grpc_metadata_array_init(
108 &(*static_cast<fling_call*>(tag)).initial_metadata_send);
109 metadata_ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
110 metadata_ops[0].data.send_initial_metadata.count = 0;
111
112 CHECK(GRPC_CALL_OK == grpc_call_start_batch((*(fling_call*)tag).call,
113 metadata_ops, 1, tag, nullptr));
114 }
115
send_status(void * tag)116 static void send_status(void* tag) {
117 status_op.op = GRPC_OP_SEND_STATUS_FROM_SERVER;
118 status_op.data.send_status_from_server.status = GRPC_STATUS_OK;
119 status_op.data.send_status_from_server.trailing_metadata_count = 0;
120 grpc_slice details = grpc_slice_from_static_string("");
121 status_op.data.send_status_from_server.status_details = &details;
122
123 CHECK(GRPC_CALL_OK == grpc_call_start_batch((*(fling_call*)tag).call,
124 &status_op, 1, tag, nullptr));
125 }
126
send_snapshot(void * tag,MemStats * snapshot)127 static void send_snapshot(void* tag, MemStats* snapshot) {
128 grpc_op* op;
129
130 grpc_slice snapshot_slice =
131 grpc_slice_new(snapshot, sizeof(*snapshot), gpr_free);
132 payload_buffer = grpc_raw_byte_buffer_create(&snapshot_slice, 1);
133 grpc_metadata_array_init(
134 &(*static_cast<fling_call*>(tag)).initial_metadata_send);
135
136 op = snapshot_ops;
137 op->op = GRPC_OP_SEND_INITIAL_METADATA;
138 op->data.send_initial_metadata.count = 0;
139 op++;
140 op->op = GRPC_OP_SEND_MESSAGE;
141 if (payload_buffer == nullptr) {
142 LOG(INFO) << "NULL payload buffer !!!";
143 }
144 op->data.send_message.send_message = payload_buffer;
145 op++;
146 op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
147 op->data.send_status_from_server.status = GRPC_STATUS_OK;
148 op->data.send_status_from_server.trailing_metadata_count = 0;
149 grpc_slice details = grpc_slice_from_static_string("");
150 op->data.send_status_from_server.status_details = &details;
151 op++;
152 op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
153 op->data.recv_close_on_server.cancelled = &was_cancelled;
154 op++;
155
156 CHECK(GRPC_CALL_OK ==
157 grpc_call_start_batch((*(fling_call*)tag).call, snapshot_ops,
158 (size_t)(op - snapshot_ops), tag, nullptr));
159 }
160 // We have some sort of deadlock, so let's not exit gracefully for now.
161 // When that is resolved, please remove the #include <unistd.h> above.
sigint_handler(int)162 static void sigint_handler(int /*x*/) { _exit(0); }
163
OnServingStatusUpdate(void *,const char * uri,grpc_serving_status_update update)164 static void OnServingStatusUpdate(void* /*user_data*/, const char* uri,
165 grpc_serving_status_update update) {
166 absl::Status status(static_cast<absl::StatusCode>(update.code),
167 update.error_message);
168 LOG(INFO) << "xDS serving status notification: uri=\"" << uri
169 << "\", status=" << status;
170 }
171
main(int argc,char ** argv)172 int main(int argc, char** argv) {
173 absl::ParseCommandLine(argc, argv);
174
175 grpc_event ev;
176 grpc_completion_queue* shutdown_cq;
177 int shutdown_started = 0;
178 int shutdown_finished = 0;
179
180 char* fake_argv[1];
181
182 CHECK_GE(argc, 1);
183 fake_argv[0] = argv[0];
184 grpc::testing::TestEnvironment env(&argc, argv);
185
186 grpc_init();
187 srand(static_cast<unsigned>(clock()));
188
189 std::string addr = absl::GetFlag(FLAGS_bind);
190 if (addr.empty()) {
191 addr = grpc_core::JoinHostPort("::", grpc_pick_unused_port_or_die());
192 }
193 LOG(INFO) << "creating server on: " << addr;
194
195 cq = grpc_completion_queue_create_for_next(nullptr);
196
197 std::vector<grpc_arg> args_vec;
198 if (absl::GetFlag(FLAGS_minstack)) {
199 args_vec.push_back(grpc_channel_arg_integer_create(
200 const_cast<char*>(GRPC_ARG_MINIMAL_STACK), 1));
201 }
202 // TODO(roth): The xDS code here duplicates the functionality in
203 // XdsServerBuilder, which is undesirable. We should ideally convert
204 // this to use the C++ API instead of the C-core API, so that we can
205 // avoid this duplication.
206 if (absl::GetFlag(FLAGS_use_xds)) {
207 args_vec.push_back(grpc_channel_arg_integer_create(
208 const_cast<char*>(GRPC_ARG_XDS_ENABLED_SERVER), 1));
209 }
210
211 grpc_channel_args args = {args_vec.size(), args_vec.data()};
212 server = grpc_server_create(&args, nullptr);
213
214 if (absl::GetFlag(FLAGS_use_xds)) {
215 grpc_server_config_fetcher* config_fetcher =
216 grpc_server_config_fetcher_xds_create({OnServingStatusUpdate, nullptr},
217 &args);
218 if (config_fetcher != nullptr) {
219 grpc_server_set_config_fetcher(server, config_fetcher);
220 }
221 }
222
223 MemStats before_server_create = MemStats::Snapshot();
224 if (absl::GetFlag(FLAGS_chaotic_good)) {
225 grpc_server_add_chaotic_good_port(server, addr.c_str());
226 } else if (absl::GetFlag(FLAGS_secure)) {
227 grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {test_server1_key,
228 test_server1_cert};
229 grpc_server_credentials* ssl_creds = grpc_ssl_server_credentials_create(
230 nullptr, &pem_key_cert_pair, 1, 0, nullptr);
231 CHECK(grpc_server_add_http2_port(server, addr.c_str(), ssl_creds));
232 grpc_server_credentials_release(ssl_creds);
233 } else {
234 CHECK(grpc_server_add_http2_port(
235 server, addr.c_str(), grpc_insecure_server_credentials_create()));
236 }
237
238 grpc_server_register_completion_queue(server, cq, nullptr);
239 grpc_server_start(server);
240
241 MemStats after_server_create = MemStats::Snapshot();
242
243 // initialize call instances
244 for (int i = 0; i < static_cast<int>(sizeof(calls) / sizeof(fling_call));
245 i++) {
246 grpc_call_details_init(&calls[i].call_details);
247 calls[i].state = FLING_SERVER_NEW_REQUEST;
248 }
249
250 int next_call_idx = 0;
251 MemStats current_snapshot;
252
253 request_call_unary(next_call_idx);
254
255 signal(SIGINT, sigint_handler);
256
257 while (!shutdown_finished) {
258 if (got_sigint && !shutdown_started) {
259 LOG(INFO) << "Shutting down due to SIGINT";
260
261 shutdown_cq = grpc_completion_queue_create_for_pluck(nullptr);
262 grpc_server_shutdown_and_notify(server, shutdown_cq, tag(1000));
263 CHECK(grpc_completion_queue_pluck(shutdown_cq, tag(1000),
264 grpc_timeout_seconds_to_deadline(5),
265 nullptr)
266 .type == GRPC_OP_COMPLETE);
267 grpc_completion_queue_destroy(shutdown_cq);
268 grpc_completion_queue_shutdown(cq);
269 shutdown_started = 1;
270 }
271 ev = grpc_completion_queue_next(
272 cq,
273 gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
274 gpr_time_from_micros(1000000, GPR_TIMESPAN)),
275 nullptr);
276 fling_call* s = static_cast<fling_call*>(ev.tag);
277 switch (ev.type) {
278 case GRPC_OP_COMPLETE:
279 switch (s->state) {
280 case FLING_SERVER_NEW_REQUEST:
281 request_call_unary(++next_call_idx);
282 if (0 == grpc_slice_str_cmp(s->call_details.method,
283 "/Reflector/reflectUnary")) {
284 s->state = FLING_SERVER_SEND_INIT_METADATA;
285 send_initial_metadata_unary(s);
286 } else if (0 ==
287 grpc_slice_str_cmp(s->call_details.method,
288 "Reflector/GetBeforeSvrCreation")) {
289 s->state = FLING_SERVER_SEND_STATUS_SNAPSHOT;
290 send_snapshot(s, &before_server_create);
291 } else if (0 ==
292 grpc_slice_str_cmp(s->call_details.method,
293 "Reflector/GetAfterSvrCreation")) {
294 s->state = FLING_SERVER_SEND_STATUS_SNAPSHOT;
295 send_snapshot(s, &after_server_create);
296 } else if (0 == grpc_slice_str_cmp(s->call_details.method,
297 "Reflector/SimpleSnapshot")) {
298 s->state = FLING_SERVER_SEND_STATUS_SNAPSHOT;
299 current_snapshot = MemStats::Snapshot();
300 send_snapshot(s, ¤t_snapshot);
301 } else if (0 == grpc_slice_str_cmp(s->call_details.method,
302 "Reflector/DestroyCalls")) {
303 s->state = FLING_SERVER_BATCH_SEND_STATUS_FLING_CALL;
304 current_snapshot = MemStats::Snapshot();
305 send_snapshot(s, ¤t_snapshot);
306 } else {
307 LOG(ERROR) << "Wrong call method";
308 }
309 break;
310 case FLING_SERVER_SEND_INIT_METADATA:
311 s->state = FLING_SERVER_WAIT_FOR_DESTROY;
312 break;
313 case FLING_SERVER_WAIT_FOR_DESTROY:
314 break;
315 case FLING_SERVER_SEND_STATUS_FLING_CALL:
316 grpc_call_unref(s->call);
317 grpc_call_details_destroy(&s->call_details);
318 grpc_metadata_array_destroy(&s->initial_metadata_send);
319 grpc_metadata_array_destroy(&s->request_metadata_recv);
320 break;
321 case FLING_SERVER_BATCH_SEND_STATUS_FLING_CALL:
322 for (int k = 0;
323 k < static_cast<int>(sizeof(calls) / sizeof(fling_call));
324 ++k) {
325 if (calls[k].state == FLING_SERVER_WAIT_FOR_DESTROY) {
326 calls[k].state = FLING_SERVER_SEND_STATUS_FLING_CALL;
327 send_status(&calls[k]);
328 }
329 }
330 ABSL_FALLTHROUGH_INTENDED;
331 // no break here since we want to continue to case
332 // FLING_SERVER_SEND_STATUS_SNAPSHOT to destroy the snapshot call
333 case FLING_SERVER_SEND_STATUS_SNAPSHOT:
334 grpc_byte_buffer_destroy(payload_buffer);
335 grpc_call_unref(s->call);
336 grpc_call_details_destroy(&s->call_details);
337 grpc_metadata_array_destroy(&s->initial_metadata_send);
338 grpc_metadata_array_destroy(&s->request_metadata_recv);
339 payload_buffer = nullptr;
340 break;
341 }
342 break;
343 case GRPC_QUEUE_SHUTDOWN:
344 CHECK(shutdown_started);
345 shutdown_finished = 1;
346 break;
347 case GRPC_QUEUE_TIMEOUT:
348 break;
349 }
350 }
351
352 grpc_server_destroy(server);
353 grpc_completion_queue_destroy(cq);
354 grpc_shutdown_blocking();
355 return 0;
356 }
357