1 /*
2 *
3 * Copyright 2018 gRPC authors.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *
17 */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/iomgr/port.h"
22
23 #include <limits.h>
24 #include <string.h>
25
26 #include <grpc/slice_buffer.h>
27
28 #include <grpc/support/alloc.h>
29 #include <grpc/support/log.h>
30 #include <grpc/support/string_util.h>
31
32 #include "src/core/lib/iomgr/error.h"
33 #include "src/core/lib/iomgr/iomgr_custom.h"
34 #include "src/core/lib/iomgr/network_status_tracker.h"
35 #include "src/core/lib/iomgr/resource_quota.h"
36 #include "src/core/lib/iomgr/tcp_client.h"
37 #include "src/core/lib/iomgr/tcp_custom.h"
38 #include "src/core/lib/iomgr/tcp_server.h"
39 #include "src/core/lib/slice/slice_internal.h"
40 #include "src/core/lib/slice/slice_string_helpers.h"
41
42 #define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
43
44 extern grpc_core::TraceFlag grpc_tcp_trace;
45
46 grpc_socket_vtable* grpc_custom_socket_vtable = nullptr;
47 extern grpc_tcp_server_vtable custom_tcp_server_vtable;
48 extern grpc_tcp_client_vtable custom_tcp_client_vtable;
49
grpc_custom_endpoint_init(grpc_socket_vtable * impl)50 void grpc_custom_endpoint_init(grpc_socket_vtable* impl) {
51 grpc_custom_socket_vtable = impl;
52 grpc_set_tcp_client_impl(&custom_tcp_client_vtable);
53 grpc_set_tcp_server_impl(&custom_tcp_server_vtable);
54 }
55
56 typedef struct {
57 grpc_endpoint base;
58 gpr_refcount refcount;
59 grpc_custom_socket* socket;
60
61 grpc_closure* read_cb;
62 grpc_closure* write_cb;
63
64 grpc_slice_buffer* read_slices;
65 grpc_slice_buffer* write_slices;
66
67 grpc_resource_user* resource_user;
68 grpc_resource_user_slice_allocator slice_allocator;
69
70 bool shutting_down;
71
72 char* peer_string;
73 } custom_tcp_endpoint;
74
tcp_free(grpc_custom_socket * s)75 static void tcp_free(grpc_custom_socket* s) {
76 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)s->endpoint;
77 grpc_resource_user_unref(tcp->resource_user);
78 gpr_free(tcp->peer_string);
79 gpr_free(tcp);
80 s->refs--;
81 if (s->refs == 0) {
82 grpc_custom_socket_vtable->destroy(s);
83 gpr_free(s);
84 }
85 }
86
87 #ifndef NDEBUG
88 #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
89 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
tcp_unref(custom_tcp_endpoint * tcp,const char * reason,const char * file,int line)90 static void tcp_unref(custom_tcp_endpoint* tcp, const char* reason,
91 const char* file, int line) {
92 if (grpc_tcp_trace.enabled()) {
93 gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
94 gpr_log(file, line, GPR_LOG_SEVERITY_ERROR,
95 "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp->socket, reason,
96 val, val - 1);
97 }
98 if (gpr_unref(&tcp->refcount)) {
99 tcp_free(tcp->socket);
100 }
101 }
102
tcp_ref(custom_tcp_endpoint * tcp,const char * reason,const char * file,int line)103 static void tcp_ref(custom_tcp_endpoint* tcp, const char* reason,
104 const char* file, int line) {
105 if (grpc_tcp_trace.enabled()) {
106 gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
107 gpr_log(file, line, GPR_LOG_SEVERITY_ERROR,
108 "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp->socket, reason,
109 val, val + 1);
110 }
111 gpr_ref(&tcp->refcount);
112 }
113 #else
114 #define TCP_UNREF(tcp, reason) tcp_unref((tcp))
115 #define TCP_REF(tcp, reason) tcp_ref((tcp))
tcp_unref(custom_tcp_endpoint * tcp)116 static void tcp_unref(custom_tcp_endpoint* tcp) {
117 if (gpr_unref(&tcp->refcount)) {
118 tcp_free(tcp->socket);
119 }
120 }
121
tcp_ref(custom_tcp_endpoint * tcp)122 static void tcp_ref(custom_tcp_endpoint* tcp) { gpr_ref(&tcp->refcount); }
123 #endif
124
call_read_cb(custom_tcp_endpoint * tcp,grpc_error * error)125 static void call_read_cb(custom_tcp_endpoint* tcp, grpc_error* error) {
126 grpc_closure* cb = tcp->read_cb;
127 if (grpc_tcp_trace.enabled()) {
128 gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp->socket, cb, cb->cb,
129 cb->cb_arg);
130 size_t i;
131 const char* str = grpc_error_string(error);
132 gpr_log(GPR_INFO, "read: error=%s", str);
133
134 for (i = 0; i < tcp->read_slices->count; i++) {
135 char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
136 GPR_DUMP_HEX | GPR_DUMP_ASCII);
137 gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
138 gpr_free(dump);
139 }
140 }
141 TCP_UNREF(tcp, "read");
142 tcp->read_slices = nullptr;
143 tcp->read_cb = nullptr;
144 GRPC_CLOSURE_SCHED(cb, error);
145 }
146
custom_read_callback(grpc_custom_socket * socket,size_t nread,grpc_error * error)147 static void custom_read_callback(grpc_custom_socket* socket, size_t nread,
148 grpc_error* error) {
149 grpc_core::ExecCtx exec_ctx;
150 grpc_slice_buffer garbage;
151 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
152 if (error == GRPC_ERROR_NONE && nread == 0) {
153 error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF");
154 }
155 if (error == GRPC_ERROR_NONE) {
156 // Successful read
157 if ((size_t)nread < tcp->read_slices->length) {
158 /* TODO(murgatroid99): Instead of discarding the unused part of the read
159 * buffer, reuse it as the next read buffer. */
160 grpc_slice_buffer_init(&garbage);
161 grpc_slice_buffer_trim_end(
162 tcp->read_slices, tcp->read_slices->length - (size_t)nread, &garbage);
163 grpc_slice_buffer_reset_and_unref_internal(&garbage);
164 }
165 } else {
166 grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
167 }
168 call_read_cb(tcp, error);
169 }
170
tcp_read_allocation_done(void * tcpp,grpc_error * error)171 static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
172 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)tcpp;
173 if (grpc_tcp_trace.enabled()) {
174 gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp->socket,
175 grpc_error_string(error));
176 }
177 if (error == GRPC_ERROR_NONE) {
178 /* Before calling read, we allocate a buffer with exactly one slice
179 * to tcp->read_slices and wait for the callback indicating that the
180 * allocation was successful. So slices[0] should always exist here */
181 char* buffer = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[0]);
182 size_t len = GRPC_SLICE_LENGTH(tcp->read_slices->slices[0]);
183 grpc_custom_socket_vtable->read(tcp->socket, buffer, len,
184 custom_read_callback);
185 } else {
186 grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
187 call_read_cb(tcp, GRPC_ERROR_REF(error));
188 }
189 if (grpc_tcp_trace.enabled()) {
190 const char* str = grpc_error_string(error);
191 gpr_log(GPR_INFO, "Initiating read on %p: error=%s", tcp->socket, str);
192 }
193 }
194
endpoint_read(grpc_endpoint * ep,grpc_slice_buffer * read_slices,grpc_closure * cb)195 static void endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
196 grpc_closure* cb) {
197 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
198 GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
199 GPR_ASSERT(tcp->read_cb == nullptr);
200 tcp->read_cb = cb;
201 tcp->read_slices = read_slices;
202 grpc_slice_buffer_reset_and_unref_internal(read_slices);
203 TCP_REF(tcp, "read");
204 grpc_resource_user_alloc_slices(&tcp->slice_allocator,
205 GRPC_TCP_DEFAULT_READ_SLICE_SIZE, 1,
206 tcp->read_slices);
207 }
208
custom_write_callback(grpc_custom_socket * socket,grpc_error * error)209 static void custom_write_callback(grpc_custom_socket* socket,
210 grpc_error* error) {
211 grpc_core::ExecCtx exec_ctx;
212 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
213 grpc_closure* cb = tcp->write_cb;
214 tcp->write_cb = nullptr;
215 if (grpc_tcp_trace.enabled()) {
216 const char* str = grpc_error_string(error);
217 gpr_log(GPR_INFO, "write complete on %p: error=%s", tcp->socket, str);
218 }
219 TCP_UNREF(tcp, "write");
220 GRPC_CLOSURE_SCHED(cb, error);
221 }
222
endpoint_write(grpc_endpoint * ep,grpc_slice_buffer * write_slices,grpc_closure * cb,void * arg)223 static void endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* write_slices,
224 grpc_closure* cb, void* arg) {
225 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
226 GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
227
228 if (grpc_tcp_trace.enabled()) {
229 size_t j;
230
231 for (j = 0; j < write_slices->count; j++) {
232 char* data = grpc_dump_slice(write_slices->slices[j],
233 GPR_DUMP_HEX | GPR_DUMP_ASCII);
234 gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp->socket, tcp->peer_string,
235 data);
236 gpr_free(data);
237 }
238 }
239
240 if (tcp->shutting_down) {
241 GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
242 "TCP socket is shutting down"));
243 return;
244 }
245
246 GPR_ASSERT(tcp->write_cb == nullptr);
247 tcp->write_slices = write_slices;
248 GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
249 if (tcp->write_slices->count == 0) {
250 // No slices means we don't have to do anything,
251 // and libuv doesn't like empty writes
252 GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
253 return;
254 }
255 tcp->write_cb = cb;
256 TCP_REF(tcp, "write");
257 grpc_custom_socket_vtable->write(tcp->socket, tcp->write_slices,
258 custom_write_callback);
259 }
260
endpoint_add_to_pollset(grpc_endpoint * ep,grpc_pollset * pollset)261 static void endpoint_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
262 // No-op. We're ignoring pollsets currently
263 (void)ep;
264 (void)pollset;
265 }
266
endpoint_add_to_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset)267 static void endpoint_add_to_pollset_set(grpc_endpoint* ep,
268 grpc_pollset_set* pollset) {
269 // No-op. We're ignoring pollsets currently
270 (void)ep;
271 (void)pollset;
272 }
273
endpoint_delete_from_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pollset)274 static void endpoint_delete_from_pollset_set(grpc_endpoint* ep,
275 grpc_pollset_set* pollset) {
276 // No-op. We're ignoring pollsets currently
277 (void)ep;
278 (void)pollset;
279 }
280
endpoint_shutdown(grpc_endpoint * ep,grpc_error * why)281 static void endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
282 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
283 if (!tcp->shutting_down) {
284 if (grpc_tcp_trace.enabled()) {
285 const char* str = grpc_error_string(why);
286 gpr_log(GPR_INFO, "TCP %p shutdown why=%s", tcp->socket, str);
287 }
288 tcp->shutting_down = true;
289 // GRPC_CLOSURE_SCHED(tcp->read_cb, GRPC_ERROR_REF(why));
290 // GRPC_CLOSURE_SCHED(tcp->write_cb, GRPC_ERROR_REF(why));
291 // tcp->read_cb = nullptr;
292 // tcp->write_cb = nullptr;
293 grpc_resource_user_shutdown(tcp->resource_user);
294 grpc_custom_socket_vtable->shutdown(tcp->socket);
295 }
296 GRPC_ERROR_UNREF(why);
297 }
298
custom_close_callback(grpc_custom_socket * socket)299 static void custom_close_callback(grpc_custom_socket* socket) {
300 socket->refs--;
301 if (socket->refs == 0) {
302 grpc_custom_socket_vtable->destroy(socket);
303 gpr_free(socket);
304 } else if (socket->endpoint) {
305 grpc_core::ExecCtx exec_ctx;
306 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
307 TCP_UNREF(tcp, "destroy");
308 }
309 }
310
endpoint_destroy(grpc_endpoint * ep)311 static void endpoint_destroy(grpc_endpoint* ep) {
312 grpc_network_status_unregister_endpoint(ep);
313 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
314 grpc_custom_socket_vtable->close(tcp->socket, custom_close_callback);
315 }
316
endpoint_get_peer(grpc_endpoint * ep)317 static char* endpoint_get_peer(grpc_endpoint* ep) {
318 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
319 return gpr_strdup(tcp->peer_string);
320 }
321
endpoint_get_resource_user(grpc_endpoint * ep)322 static grpc_resource_user* endpoint_get_resource_user(grpc_endpoint* ep) {
323 custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
324 return tcp->resource_user;
325 }
326
endpoint_get_fd(grpc_endpoint * ep)327 static int endpoint_get_fd(grpc_endpoint* ep) { return -1; }
328
329 static grpc_endpoint_vtable vtable = {endpoint_read,
330 endpoint_write,
331 endpoint_add_to_pollset,
332 endpoint_add_to_pollset_set,
333 endpoint_delete_from_pollset_set,
334 endpoint_shutdown,
335 endpoint_destroy,
336 endpoint_get_resource_user,
337 endpoint_get_peer,
338 endpoint_get_fd};
339
custom_tcp_endpoint_create(grpc_custom_socket * socket,grpc_resource_quota * resource_quota,char * peer_string)340 grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
341 grpc_resource_quota* resource_quota,
342 char* peer_string) {
343 custom_tcp_endpoint* tcp =
344 (custom_tcp_endpoint*)gpr_malloc(sizeof(custom_tcp_endpoint));
345 grpc_core::ExecCtx exec_ctx;
346
347 if (grpc_tcp_trace.enabled()) {
348 gpr_log(GPR_INFO, "Creating TCP endpoint %p", socket);
349 }
350 memset(tcp, 0, sizeof(custom_tcp_endpoint));
351 socket->refs++;
352 socket->endpoint = (grpc_endpoint*)tcp;
353 tcp->socket = socket;
354 tcp->base.vtable = &vtable;
355 gpr_ref_init(&tcp->refcount, 1);
356 tcp->peer_string = gpr_strdup(peer_string);
357 tcp->shutting_down = false;
358 tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
359 grpc_resource_user_slice_allocator_init(
360 &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
361 /* Tell network status tracking code about the new endpoint */
362 grpc_network_status_register_endpoint(&tcp->base);
363
364 return &tcp->base;
365 }
366