1 //
2 //
3 // Copyright 2015 gRPC authors.
4 //
5 // Licensed under the Apache License, Version 2.0 (the "License");
6 // you may not use this file except in compliance with the License.
7 // You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License.
16 //
17 //
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/iomgr/port.h"
22
23 #ifdef GRPC_WINSOCK_SOCKET
24
25 #include <limits.h>
26
27 #include <grpc/slice_buffer.h>
28 #include <grpc/support/alloc.h>
29 #include <grpc/support/log.h>
30 #include <grpc/support/log_windows.h>
31 #include <grpc/support/string_util.h>
32
33 #include "src/core/lib/address_utils/sockaddr_utils.h"
34 #include "src/core/lib/gpr/string.h"
35 #include "src/core/lib/gpr/useful.h"
36 #include "src/core/lib/gprpp/crash.h"
37 #include "src/core/lib/iomgr/iocp_windows.h"
38 #include "src/core/lib/iomgr/sockaddr.h"
39 #include "src/core/lib/iomgr/sockaddr_windows.h"
40 #include "src/core/lib/iomgr/socket_windows.h"
41 #include "src/core/lib/iomgr/tcp_client.h"
42 #include "src/core/lib/iomgr/tcp_windows.h"
43 #include "src/core/lib/iomgr/timer.h"
44 #include "src/core/lib/slice/slice_internal.h"
45 #include "src/core/lib/slice/slice_string_helpers.h"
46
47 #if defined(__MSYS__) && defined(GPR_ARCH_64)
48 // Nasty workaround for nasty bug when using the 64 bits msys compiler
49 // in conjunction with Microsoft Windows headers.
50 #define GRPC_FIONBIO _IOW('f', 126, uint32_t)
51 #else
52 #define GRPC_FIONBIO FIONBIO
53 #endif
54
55 extern grpc_core::TraceFlag grpc_tcp_trace;
56
grpc_tcp_set_non_block(SOCKET sock)57 grpc_error_handle grpc_tcp_set_non_block(SOCKET sock) {
58 int status;
59 uint32_t param = 1;
60 DWORD ret;
61 status = WSAIoctl(sock, GRPC_FIONBIO, ¶m, sizeof(param), NULL, 0, &ret,
62 NULL, NULL);
63 return status == 0
64 ? absl::OkStatus()
65 : GRPC_WSA_ERROR(WSAGetLastError(), "WSAIoctl(GRPC_FIONBIO)");
66 }
67
set_dualstack(SOCKET sock)68 static grpc_error_handle set_dualstack(SOCKET sock) {
69 int status;
70 unsigned long param = 0;
71 status = setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char*)¶m,
72 sizeof(param));
73 return status == 0
74 ? absl::OkStatus()
75 : GRPC_WSA_ERROR(WSAGetLastError(), "setsockopt(IPV6_V6ONLY)");
76 }
77
enable_socket_low_latency(SOCKET sock)78 static grpc_error_handle enable_socket_low_latency(SOCKET sock) {
79 int status;
80 BOOL param = TRUE;
81 status = ::setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
82 reinterpret_cast<char*>(¶m), sizeof(param));
83 if (status == SOCKET_ERROR) {
84 status = WSAGetLastError();
85 }
86 return status == 0 ? absl::OkStatus()
87 : GRPC_WSA_ERROR(status, "setsockopt(TCP_NODELAY)");
88 }
89
grpc_tcp_prepare_socket(SOCKET sock)90 grpc_error_handle grpc_tcp_prepare_socket(SOCKET sock) {
91 grpc_error_handle err;
92 err = grpc_tcp_set_non_block(sock);
93 if (!err.ok()) return err;
94 err = set_dualstack(sock);
95 if (!err.ok()) return err;
96 err = enable_socket_low_latency(sock);
97 if (!err.ok()) return err;
98 return absl::OkStatus();
99 }
100
101 typedef struct grpc_tcp {
102 // This is our C++ class derivation emulation.
103 grpc_endpoint base;
104 // The one socket this endpoint is using.
105 grpc_winsocket* socket;
106 // Refcounting how many operations are in progress.
107 gpr_refcount refcount;
108
109 grpc_closure on_read;
110 grpc_closure on_write;
111
112 grpc_closure* read_cb;
113 grpc_closure* write_cb;
114
115 // garbage after the last read
116 grpc_slice_buffer last_read_buffer;
117
118 grpc_slice_buffer* write_slices;
119 grpc_slice_buffer* read_slices;
120
121 // The IO Completion Port runs from another thread. We need some mechanism
122 // to protect ourselves when requesting a shutdown.
123 gpr_mu mu;
124 int shutting_down;
125 grpc_error_handle shutdown_error;
126
127 std::string peer_string;
128 std::string local_address;
129 } grpc_tcp;
130
tcp_free(grpc_tcp * tcp)131 static void tcp_free(grpc_tcp* tcp) {
132 grpc_winsocket_destroy(tcp->socket);
133 gpr_mu_destroy(&tcp->mu);
134 grpc_slice_buffer_destroy(&tcp->last_read_buffer);
135 delete tcp;
136 }
137
138 #ifndef NDEBUG
139 #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
140 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
tcp_unref(grpc_tcp * tcp,const char * reason,const char * file,int line)141 static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
142 int line) {
143 if (grpc_tcp_trace.enabled()) {
144 gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
145 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
146 "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
147 val - 1);
148 }
149 if (gpr_unref(&tcp->refcount)) {
150 tcp_free(tcp);
151 }
152 }
153
tcp_ref(grpc_tcp * tcp,const char * reason,const char * file,int line)154 static void tcp_ref(grpc_tcp* tcp, const char* reason, const char* file,
155 int line) {
156 if (grpc_tcp_trace.enabled()) {
157 gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
158 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
159 "TCP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
160 val + 1);
161 }
162 gpr_ref(&tcp->refcount);
163 }
164 #else
165 #define TCP_UNREF(tcp, reason) tcp_unref((tcp))
166 #define TCP_REF(tcp, reason) tcp_ref((tcp))
tcp_unref(grpc_tcp * tcp)167 static void tcp_unref(grpc_tcp* tcp) {
168 if (gpr_unref(&tcp->refcount)) {
169 tcp_free(tcp);
170 }
171 }
172
tcp_ref(grpc_tcp * tcp)173 static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); }
174 #endif
175
176 // Asynchronous callback from the IOCP, or the background thread.
on_read(void * tcpp,grpc_error_handle error)177 static void on_read(void* tcpp, grpc_error_handle error) {
178 grpc_tcp* tcp = (grpc_tcp*)tcpp;
179 grpc_closure* cb = tcp->read_cb;
180 grpc_winsocket* socket = tcp->socket;
181 grpc_winsocket_callback_info* info = &socket->read_info;
182
183 if (grpc_tcp_trace.enabled()) {
184 gpr_log(GPR_INFO, "TCP:%p on_read", tcp);
185 }
186
187 if (error.ok()) {
188 if (info->wsa_error != 0 && !tcp->shutting_down) {
189 error = GRPC_WSA_ERROR(info->wsa_error, "IOCP/Socket");
190 grpc_slice_buffer_reset_and_unref(tcp->read_slices);
191 } else {
192 if (info->bytes_transferred != 0 && !tcp->shutting_down) {
193 GPR_ASSERT((size_t)info->bytes_transferred <= tcp->read_slices->length);
194 if (static_cast<size_t>(info->bytes_transferred) !=
195 tcp->read_slices->length) {
196 grpc_slice_buffer_trim_end(
197 tcp->read_slices,
198 tcp->read_slices->length -
199 static_cast<size_t>(info->bytes_transferred),
200 &tcp->last_read_buffer);
201 }
202 GPR_ASSERT((size_t)info->bytes_transferred == tcp->read_slices->length);
203
204 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace) &&
205 gpr_should_log(GPR_LOG_SEVERITY_INFO)) {
206 size_t i;
207 for (i = 0; i < tcp->read_slices->count; i++) {
208 char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
209 GPR_DUMP_HEX | GPR_DUMP_ASCII);
210 gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp,
211 tcp->peer_string.c_str(), dump);
212 gpr_free(dump);
213 }
214 }
215 } else {
216 if (grpc_tcp_trace.enabled()) {
217 gpr_log(GPR_INFO, "TCP:%p unref read_slice", tcp);
218 }
219 grpc_slice_buffer_reset_and_unref(tcp->read_slices);
220 error = grpc_error_set_int(
221 tcp->shutting_down
222 ? GRPC_ERROR_CREATE_REFERENCING("TCP stream shutting down",
223 &tcp->shutdown_error, 1)
224 : GRPC_ERROR_CREATE("End of TCP stream"),
225 grpc_core::StatusIntProperty::kRpcStatus, GRPC_STATUS_UNAVAILABLE);
226 }
227 }
228 }
229
230 tcp->read_cb = NULL;
231 TCP_UNREF(tcp, "read");
232 grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, error);
233 }
234
235 #define DEFAULT_TARGET_READ_SIZE 8192
236 #define MAX_WSABUF_COUNT 16
win_read(grpc_endpoint * ep,grpc_slice_buffer * read_slices,grpc_closure * cb,bool,int)237 static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
238 grpc_closure* cb, bool /* urgent */,
239 int /* min_progress_size */) {
240 grpc_tcp* tcp = (grpc_tcp*)ep;
241 grpc_winsocket* handle = tcp->socket;
242 grpc_winsocket_callback_info* info = &handle->read_info;
243 int status;
244 DWORD bytes_read = 0;
245 DWORD flags = 0;
246 WSABUF buffers[MAX_WSABUF_COUNT];
247 size_t i;
248
249 if (grpc_tcp_trace.enabled()) {
250 gpr_log(GPR_INFO, "TCP:%p win_read", tcp);
251 }
252
253 if (tcp->shutting_down) {
254 grpc_core::ExecCtx::Run(
255 DEBUG_LOCATION, cb,
256 grpc_error_set_int(
257 GRPC_ERROR_CREATE_REFERENCING("TCP socket is shutting down",
258 &tcp->shutdown_error, 1),
259 grpc_core::StatusIntProperty::kRpcStatus, GRPC_STATUS_UNAVAILABLE));
260 return;
261 }
262
263 tcp->read_cb = cb;
264 tcp->read_slices = read_slices;
265 grpc_slice_buffer_reset_and_unref(read_slices);
266 grpc_slice_buffer_swap(read_slices, &tcp->last_read_buffer);
267
268 if (tcp->read_slices->length < DEFAULT_TARGET_READ_SIZE / 2 &&
269 tcp->read_slices->count < MAX_WSABUF_COUNT) {
270 // TODO(jtattermusch): slice should be allocated using resource quota
271 grpc_slice_buffer_add(tcp->read_slices,
272 GRPC_SLICE_MALLOC(DEFAULT_TARGET_READ_SIZE));
273 }
274
275 GPR_ASSERT(tcp->read_slices->count <= MAX_WSABUF_COUNT);
276 for (i = 0; i < tcp->read_slices->count; i++) {
277 buffers[i].len = (ULONG)GRPC_SLICE_LENGTH(
278 tcp->read_slices->slices[i]); // we know slice size fits in 32bit.
279 buffers[i].buf = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[i]);
280 }
281
282 TCP_REF(tcp, "read");
283
284 // First let's try a synchronous, non-blocking read.
285 status = WSARecv(tcp->socket->socket, buffers, (DWORD)tcp->read_slices->count,
286 &bytes_read, &flags, NULL, NULL);
287 info->wsa_error = status == 0 ? 0 : WSAGetLastError();
288
289 // Did we get data immediately ? Yay.
290 if (info->wsa_error != WSAEWOULDBLOCK) {
291 info->bytes_transferred = bytes_read;
292 grpc_core::ExecCtx::Run(DEBUG_LOCATION, &tcp->on_read, absl::OkStatus());
293 return;
294 }
295
296 // Otherwise, let's retry, by queuing a read.
297 memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
298 status = WSARecv(tcp->socket->socket, buffers, (DWORD)tcp->read_slices->count,
299 &bytes_read, &flags, &info->overlapped, NULL);
300
301 if (status != 0) {
302 int wsa_error = WSAGetLastError();
303 if (wsa_error != WSA_IO_PENDING) {
304 info->wsa_error = wsa_error;
305 grpc_core::ExecCtx::Run(DEBUG_LOCATION, &tcp->on_read,
306 GRPC_WSA_ERROR(info->wsa_error, "WSARecv"));
307 return;
308 }
309 }
310
311 grpc_socket_notify_on_read(tcp->socket, &tcp->on_read);
312 }
313
314 // Asynchronous callback from the IOCP, or the background thread.
on_write(void * tcpp,grpc_error_handle error)315 static void on_write(void* tcpp, grpc_error_handle error) {
316 grpc_tcp* tcp = (grpc_tcp*)tcpp;
317 grpc_winsocket* handle = tcp->socket;
318 grpc_winsocket_callback_info* info = &handle->write_info;
319 grpc_closure* cb;
320
321 if (grpc_tcp_trace.enabled()) {
322 gpr_log(GPR_INFO, "TCP:%p on_write", tcp);
323 }
324
325 gpr_mu_lock(&tcp->mu);
326 cb = tcp->write_cb;
327 tcp->write_cb = NULL;
328 gpr_mu_unlock(&tcp->mu);
329
330 if (error.ok()) {
331 if (info->wsa_error != 0) {
332 error = GRPC_WSA_ERROR(info->wsa_error, "WSASend");
333 } else {
334 GPR_ASSERT(info->bytes_transferred <= tcp->write_slices->length);
335 }
336 }
337
338 TCP_UNREF(tcp, "write");
339 grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, error);
340 }
341
342 // Initiates a write.
win_write(grpc_endpoint * ep,grpc_slice_buffer * slices,grpc_closure * cb,void *,int)343 static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
344 grpc_closure* cb, void* /* arg */,
345 int /* max_frame_size */) {
346 grpc_tcp* tcp = (grpc_tcp*)ep;
347 grpc_winsocket* socket = tcp->socket;
348 grpc_winsocket_callback_info* info = &socket->write_info;
349 unsigned i;
350 DWORD bytes_sent;
351 int status;
352 WSABUF local_buffers[MAX_WSABUF_COUNT];
353 WSABUF* allocated = NULL;
354 WSABUF* buffers = local_buffers;
355 size_t len, async_buffers_offset = 0;
356
357 if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace) &&
358 gpr_should_log(GPR_LOG_SEVERITY_INFO)) {
359 size_t i;
360 for (i = 0; i < slices->count; i++) {
361 char* data =
362 grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
363 gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string.c_str(),
364 data);
365 gpr_free(data);
366 }
367 }
368
369 if (tcp->shutting_down) {
370 grpc_core::ExecCtx::Run(
371 DEBUG_LOCATION, cb,
372 grpc_error_set_int(
373 GRPC_ERROR_CREATE_REFERENCING("TCP socket is shutting down",
374 &tcp->shutdown_error, 1),
375 grpc_core::StatusIntProperty::kRpcStatus, GRPC_STATUS_UNAVAILABLE));
376 return;
377 }
378
379 tcp->write_cb = cb;
380 tcp->write_slices = slices;
381 GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
382 if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) {
383 buffers = (WSABUF*)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count);
384 allocated = buffers;
385 }
386
387 for (i = 0; i < tcp->write_slices->count; i++) {
388 len = GRPC_SLICE_LENGTH(tcp->write_slices->slices[i]);
389 GPR_ASSERT(len <= ULONG_MAX);
390 buffers[i].len = (ULONG)len;
391 buffers[i].buf = (char*)GRPC_SLICE_START_PTR(tcp->write_slices->slices[i]);
392 }
393
394 // First, let's try a synchronous, non-blocking write.
395 status = WSASend(socket->socket, buffers, (DWORD)tcp->write_slices->count,
396 &bytes_sent, 0, NULL, NULL);
397
398 if (status == 0) {
399 if (bytes_sent == tcp->write_slices->length) {
400 info->wsa_error = 0;
401 grpc_error_handle error = absl::OkStatus();
402 grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, error);
403 if (allocated) gpr_free(allocated);
404 return;
405 }
406
407 // The data was not completely delivered, we should send the rest of
408 // them by doing an async write operation.
409 for (i = 0; i < tcp->write_slices->count; i++) {
410 if (buffers[i].len > bytes_sent) {
411 buffers[i].buf += bytes_sent;
412 buffers[i].len -= bytes_sent;
413 break;
414 }
415 bytes_sent -= buffers[i].len;
416 async_buffers_offset++;
417 }
418 } else {
419 info->wsa_error = WSAGetLastError();
420
421 // We would kind of expect to get a WSAEWOULDBLOCK here, especially on a
422 // busy connection that has its send queue filled up. But if we don't, then
423 // we can avoid doing an async write operation at all.
424 if (info->wsa_error != WSAEWOULDBLOCK) {
425 grpc_error_handle error = GRPC_WSA_ERROR(info->wsa_error, "WSASend");
426 grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, error);
427 if (allocated) gpr_free(allocated);
428 return;
429 }
430 }
431
432 TCP_REF(tcp, "write");
433
434 // If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
435 // operation, this time asynchronously.
436 memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED));
437 status = WSASend(socket->socket, buffers + async_buffers_offset,
438 (DWORD)(tcp->write_slices->count - async_buffers_offset),
439 NULL, 0, &socket->write_info.overlapped, NULL);
440 if (allocated) gpr_free(allocated);
441
442 if (status != 0) {
443 int wsa_error = WSAGetLastError();
444 if (wsa_error != WSA_IO_PENDING) {
445 TCP_UNREF(tcp, "write");
446 grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb,
447 GRPC_WSA_ERROR(wsa_error, "WSASend"));
448 return;
449 }
450 }
451
452 // As all is now setup, we can now ask for the IOCP notification. It may
453 // trigger the callback immediately however, but no matter.
454 grpc_socket_notify_on_write(socket, &tcp->on_write);
455 }
456
win_add_to_pollset(grpc_endpoint * ep,grpc_pollset * ps)457 static void win_add_to_pollset(grpc_endpoint* ep, grpc_pollset* ps) {
458 grpc_tcp* tcp;
459 (void)ps;
460 tcp = (grpc_tcp*)ep;
461 grpc_iocp_add_socket(tcp->socket);
462 }
463
win_add_to_pollset_set(grpc_endpoint * ep,grpc_pollset_set * pss)464 static void win_add_to_pollset_set(grpc_endpoint* ep, grpc_pollset_set* pss) {
465 grpc_tcp* tcp;
466 (void)pss;
467 tcp = (grpc_tcp*)ep;
468 grpc_iocp_add_socket(tcp->socket);
469 }
470
win_delete_from_pollset_set(grpc_endpoint *,grpc_pollset_set *)471 static void win_delete_from_pollset_set(grpc_endpoint* /* ep */,
472 grpc_pollset_set* /* pss */) {}
473
474 // Initiates a shutdown of the TCP endpoint. This will queue abort callbacks
475 // for the potential read and write operations. It is up to the caller to
476 // guarantee this isn't called in parallel to a read or write request, so
477 // we're not going to protect against these. However the IO Completion Port
478 // callback will happen from another thread, so we need to protect against
479 // concurrent access of the data structure in that regard.
win_shutdown(grpc_endpoint * ep,grpc_error_handle why)480 static void win_shutdown(grpc_endpoint* ep, grpc_error_handle why) {
481 grpc_tcp* tcp = (grpc_tcp*)ep;
482 gpr_mu_lock(&tcp->mu);
483 // At that point, what may happen is that we're already inside the IOCP
484 // callback. See the comments in on_read and on_write.
485 if (!tcp->shutting_down) {
486 tcp->shutting_down = 1;
487 tcp->shutdown_error = why;
488 }
489 grpc_winsocket_shutdown(tcp->socket);
490 gpr_mu_unlock(&tcp->mu);
491 }
492
win_destroy(grpc_endpoint * ep)493 static void win_destroy(grpc_endpoint* ep) {
494 grpc_tcp* tcp = (grpc_tcp*)ep;
495 grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
496 TCP_UNREF(tcp, "destroy");
497 }
498
win_get_peer(grpc_endpoint * ep)499 static absl::string_view win_get_peer(grpc_endpoint* ep) {
500 grpc_tcp* tcp = (grpc_tcp*)ep;
501 return tcp->peer_string;
502 }
503
win_get_local_address(grpc_endpoint * ep)504 static absl::string_view win_get_local_address(grpc_endpoint* ep) {
505 grpc_tcp* tcp = (grpc_tcp*)ep;
506 return tcp->local_address;
507 }
508
win_get_fd(grpc_endpoint *)509 static int win_get_fd(grpc_endpoint* /* ep */) { return -1; }
510
win_can_track_err(grpc_endpoint *)511 static bool win_can_track_err(grpc_endpoint* /* ep */) { return false; }
512
513 static grpc_endpoint_vtable vtable = {win_read,
514 win_write,
515 win_add_to_pollset,
516 win_add_to_pollset_set,
517 win_delete_from_pollset_set,
518 win_shutdown,
519 win_destroy,
520 win_get_peer,
521 win_get_local_address,
522 win_get_fd,
523 win_can_track_err};
524
grpc_tcp_create(grpc_winsocket * socket,absl::string_view peer_string)525 grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
526 absl::string_view peer_string) {
527 // TODO(jtattermusch): C++ize grpc_tcp and its dependencies (i.e. add
528 // constructors) to ensure proper initialization
529 grpc_tcp* tcp = new grpc_tcp{};
530 tcp->base.vtable = &vtable;
531 tcp->socket = socket;
532 gpr_mu_init(&tcp->mu);
533 gpr_ref_init(&tcp->refcount, 1);
534 GRPC_CLOSURE_INIT(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
535 GRPC_CLOSURE_INIT(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
536 grpc_resolved_address resolved_local_addr;
537 resolved_local_addr.len = sizeof(resolved_local_addr.addr);
538 absl::StatusOr<std::string> addr_uri;
539 if (getsockname(tcp->socket->socket,
540 reinterpret_cast<sockaddr*>(resolved_local_addr.addr),
541 &resolved_local_addr.len) < 0 ||
542 !(addr_uri = grpc_sockaddr_to_uri(&resolved_local_addr)).ok()) {
543 tcp->local_address = "";
544 } else {
545 tcp->local_address = grpc_sockaddr_to_uri(&resolved_local_addr).value();
546 }
547 tcp->peer_string = std::string(peer_string);
548 grpc_slice_buffer_init(&tcp->last_read_buffer);
549 return &tcp->base;
550 }
551
552 #endif // GRPC_WINSOCK_SOCKET
553