1 // Copyright 2012 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/socket/transport_client_socket_pool.h"
6
7 #include <utility>
8
9 #include "base/auto_reset.h"
10 #include "base/barrier_closure.h"
11 #include "base/check_op.h"
12 #include "base/compiler_specific.h"
13 #include "base/containers/contains.h"
14 #include "base/format_macros.h"
15 #include "base/functional/bind.h"
16 #include "base/functional/callback_helpers.h"
17 #include "base/location.h"
18 #include "base/memory/ptr_util.h"
19 #include "base/metrics/histogram_macros.h"
20 #include "base/notreached.h"
21 #include "base/ranges/algorithm.h"
22 #include "base/strings/string_util.h"
23 #include "base/task/single_thread_task_runner.h"
24 #include "base/time/time.h"
25 #include "base/values.h"
26 #include "net/base/host_port_pair.h"
27 #include "net/base/net_errors.h"
28 #include "net/base/proxy_server.h"
29 #include "net/log/net_log.h"
30 #include "net/log/net_log_event_type.h"
31 #include "net/log/net_log_source.h"
32 #include "net/socket/connect_job_factory.h"
33 #include "net/traffic_annotation/network_traffic_annotation.h"
34 #include "url/gurl.h"
35
36 namespace net {
37
38 namespace {
39
40 // Indicate whether or not we should establish a new transport layer connection
41 // after a certain timeout has passed without receiving an ACK.
42 bool g_connect_backup_jobs_enabled = true;
43
NetLogCreateConnectJobParams(bool backup_job,const ClientSocketPool::GroupId * group_id)44 base::Value::Dict NetLogCreateConnectJobParams(
45 bool backup_job,
46 const ClientSocketPool::GroupId* group_id) {
47 base::Value::Dict dict;
48 dict.Set("backup_job", backup_job);
49 dict.Set("group_id", group_id->ToString());
50 return dict;
51 }
52
53 } // namespace
54
55 const char TransportClientSocketPool::kCertDatabaseChanged[] =
56 "Cert database changed";
57 const char TransportClientSocketPool::kCertVerifierChanged[] =
58 "Cert verifier changed";
59 const char TransportClientSocketPool::kClosedConnectionReturnedToPool[] =
60 "Connection was closed when it was returned to the pool";
61 const char TransportClientSocketPool::kDataReceivedUnexpectedly[] =
62 "Data received unexpectedly";
63 const char TransportClientSocketPool::kIdleTimeLimitExpired[] =
64 "Idle time limit expired";
65 const char TransportClientSocketPool::kNetworkChanged[] = "Network changed";
66 const char TransportClientSocketPool::kRemoteSideClosedConnection[] =
67 "Remote side closed connection";
68 const char TransportClientSocketPool::kSocketGenerationOutOfDate[] =
69 "Socket generation out of date";
70 const char TransportClientSocketPool::kSocketPoolDestroyed[] =
71 "Socket pool destroyed";
72 const char TransportClientSocketPool::kSslConfigChanged[] =
73 "SSL configuration changed";
74
Request(ClientSocketHandle * handle,CompletionOnceCallback callback,const ProxyAuthCallback & proxy_auth_callback,RequestPriority priority,const SocketTag & socket_tag,RespectLimits respect_limits,Flags flags,scoped_refptr<SocketParams> socket_params,const absl::optional<NetworkTrafficAnnotationTag> & proxy_annotation_tag,const NetLogWithSource & net_log)75 TransportClientSocketPool::Request::Request(
76 ClientSocketHandle* handle,
77 CompletionOnceCallback callback,
78 const ProxyAuthCallback& proxy_auth_callback,
79 RequestPriority priority,
80 const SocketTag& socket_tag,
81 RespectLimits respect_limits,
82 Flags flags,
83 scoped_refptr<SocketParams> socket_params,
84 const absl::optional<NetworkTrafficAnnotationTag>& proxy_annotation_tag,
85 const NetLogWithSource& net_log)
86 : handle_(handle),
87 callback_(std::move(callback)),
88 proxy_auth_callback_(proxy_auth_callback),
89 priority_(priority),
90 respect_limits_(respect_limits),
91 flags_(flags),
92 socket_params_(std::move(socket_params)),
93 proxy_annotation_tag_(proxy_annotation_tag),
94 net_log_(net_log),
95 socket_tag_(socket_tag) {
96 if (respect_limits_ == ClientSocketPool::RespectLimits::DISABLED)
97 DCHECK_EQ(priority_, MAXIMUM_PRIORITY);
98 }
99
100 TransportClientSocketPool::Request::~Request() = default;
101
AssignJob(ConnectJob * job)102 void TransportClientSocketPool::Request::AssignJob(ConnectJob* job) {
103 DCHECK(job);
104 DCHECK(!job_);
105 job_ = job;
106 if (job_->priority() != priority_)
107 job_->ChangePriority(priority_);
108 }
109
ReleaseJob()110 ConnectJob* TransportClientSocketPool::Request::ReleaseJob() {
111 DCHECK(job_);
112 ConnectJob* job = job_;
113 job_ = nullptr;
114 return job;
115 }
116
117 struct TransportClientSocketPool::IdleSocket {
118 // An idle socket can't be used if it is disconnected or has been used
119 // before and has received data unexpectedly (hence no longer idle). The
120 // unread data would be mistaken for the beginning of the next response if
121 // we were to use the socket for a new request.
122 //
123 // Note that a socket that has never been used before (like a preconnected
124 // socket) may be used even with unread data. This may be, e.g., a SPDY
125 // SETTINGS frame.
126 //
127 // If the socket is not usable, |net_log_reason_utf8| is set to a string
128 // indicating why the socket is not usable.
129 bool IsUsable(const char** net_log_reason_utf8) const;
130
131 std::unique_ptr<StreamSocket> socket;
132 base::TimeTicks start_time;
133 };
134
TransportClientSocketPool(int max_sockets,int max_sockets_per_group,base::TimeDelta unused_idle_socket_timeout,const ProxyServer & proxy_server,bool is_for_websockets,const CommonConnectJobParams * common_connect_job_params,bool cleanup_on_ip_address_change)135 TransportClientSocketPool::TransportClientSocketPool(
136 int max_sockets,
137 int max_sockets_per_group,
138 base::TimeDelta unused_idle_socket_timeout,
139 const ProxyServer& proxy_server,
140 bool is_for_websockets,
141 const CommonConnectJobParams* common_connect_job_params,
142 bool cleanup_on_ip_address_change)
143 : TransportClientSocketPool(max_sockets,
144 max_sockets_per_group,
145 unused_idle_socket_timeout,
146 ClientSocketPool::used_idle_socket_timeout(),
147 proxy_server,
148 is_for_websockets,
149 common_connect_job_params,
150 cleanup_on_ip_address_change,
151 std::make_unique<ConnectJobFactory>(),
152 common_connect_job_params->ssl_client_context,
153 true /* connect_backup_jobs_enabled */) {}
154
~TransportClientSocketPool()155 TransportClientSocketPool::~TransportClientSocketPool() {
156 // Clean up any idle sockets and pending connect jobs. Assert that we have no
157 // remaining active sockets or pending requests. They should have all been
158 // cleaned up prior to |this| being destroyed.
159 FlushWithError(ERR_ABORTED, kSocketPoolDestroyed);
160 DCHECK(group_map_.empty());
161 DCHECK(pending_callback_map_.empty());
162 DCHECK_EQ(0, connecting_socket_count_);
163 DCHECK_EQ(0, handed_out_socket_count_);
164 CHECK(higher_pools_.empty());
165
166 if (ssl_client_context_)
167 ssl_client_context_->RemoveObserver(this);
168
169 if (cleanup_on_ip_address_change_)
170 NetworkChangeNotifier::RemoveIPAddressObserver(this);
171 }
172
173 std::unique_ptr<TransportClientSocketPool>
CreateForTesting(int max_sockets,int max_sockets_per_group,base::TimeDelta unused_idle_socket_timeout,base::TimeDelta used_idle_socket_timeout,const ProxyServer & proxy_server,bool is_for_websockets,const CommonConnectJobParams * common_connect_job_params,std::unique_ptr<ConnectJobFactory> connect_job_factory,SSLClientContext * ssl_client_context,bool connect_backup_jobs_enabled)174 TransportClientSocketPool::CreateForTesting(
175 int max_sockets,
176 int max_sockets_per_group,
177 base::TimeDelta unused_idle_socket_timeout,
178 base::TimeDelta used_idle_socket_timeout,
179 const ProxyServer& proxy_server,
180 bool is_for_websockets,
181 const CommonConnectJobParams* common_connect_job_params,
182 std::unique_ptr<ConnectJobFactory> connect_job_factory,
183 SSLClientContext* ssl_client_context,
184 bool connect_backup_jobs_enabled) {
185 return base::WrapUnique<TransportClientSocketPool>(
186 new TransportClientSocketPool(
187 max_sockets, max_sockets_per_group, unused_idle_socket_timeout,
188 used_idle_socket_timeout, proxy_server, is_for_websockets,
189 common_connect_job_params, true /* cleanup_on_ip_address_change */,
190 std::move(connect_job_factory), ssl_client_context,
191 connect_backup_jobs_enabled));
192 }
193
CallbackResultPair()194 TransportClientSocketPool::CallbackResultPair::CallbackResultPair()
195 : result(OK) {}
196
CallbackResultPair(CompletionOnceCallback callback_in,int result_in)197 TransportClientSocketPool::CallbackResultPair::CallbackResultPair(
198 CompletionOnceCallback callback_in,
199 int result_in)
200 : callback(std::move(callback_in)), result(result_in) {}
201
202 TransportClientSocketPool::CallbackResultPair::CallbackResultPair(
203 TransportClientSocketPool::CallbackResultPair&& other) = default;
204
205 TransportClientSocketPool::CallbackResultPair&
206 TransportClientSocketPool::CallbackResultPair::operator=(
207 TransportClientSocketPool::CallbackResultPair&& other) = default;
208
209 TransportClientSocketPool::CallbackResultPair::~CallbackResultPair() = default;
210
IsStalled() const211 bool TransportClientSocketPool::IsStalled() const {
212 // If fewer than |max_sockets_| are in use, then clearly |this| is not
213 // stalled.
214 if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_)
215 return false;
216 // So in order to be stalled, |this| must be using at least |max_sockets_| AND
217 // |this| must have a request that is actually stalled on the global socket
218 // limit. To find such a request, look for a group that has more requests
219 // than jobs AND where the number of sockets is less than
220 // |max_sockets_per_group_|. (If the number of sockets is equal to
221 // |max_sockets_per_group_|, then the request is stalled on the group limit,
222 // which does not count.)
223 for (const auto& it : group_map_) {
224 if (it.second->CanUseAdditionalSocketSlot(max_sockets_per_group_))
225 return true;
226 }
227 return false;
228 }
229
AddHigherLayeredPool(HigherLayeredPool * higher_pool)230 void TransportClientSocketPool::AddHigherLayeredPool(
231 HigherLayeredPool* higher_pool) {
232 CHECK(higher_pool);
233 CHECK(!base::Contains(higher_pools_, higher_pool));
234 higher_pools_.insert(higher_pool);
235 }
236
RemoveHigherLayeredPool(HigherLayeredPool * higher_pool)237 void TransportClientSocketPool::RemoveHigherLayeredPool(
238 HigherLayeredPool* higher_pool) {
239 CHECK(higher_pool);
240 CHECK(base::Contains(higher_pools_, higher_pool));
241 higher_pools_.erase(higher_pool);
242 }
243
RequestSocket(const GroupId & group_id,scoped_refptr<SocketParams> params,const absl::optional<NetworkTrafficAnnotationTag> & proxy_annotation_tag,RequestPriority priority,const SocketTag & socket_tag,RespectLimits respect_limits,ClientSocketHandle * handle,CompletionOnceCallback callback,const ProxyAuthCallback & proxy_auth_callback,const NetLogWithSource & net_log)244 int TransportClientSocketPool::RequestSocket(
245 const GroupId& group_id,
246 scoped_refptr<SocketParams> params,
247 const absl::optional<NetworkTrafficAnnotationTag>& proxy_annotation_tag,
248 RequestPriority priority,
249 const SocketTag& socket_tag,
250 RespectLimits respect_limits,
251 ClientSocketHandle* handle,
252 CompletionOnceCallback callback,
253 const ProxyAuthCallback& proxy_auth_callback,
254 const NetLogWithSource& net_log) {
255 CHECK(callback);
256 CHECK(handle);
257
258 NetLogTcpClientSocketPoolRequestedSocket(net_log, group_id);
259
260 std::unique_ptr<Request> request = std::make_unique<Request>(
261 handle, std::move(callback), proxy_auth_callback, priority, socket_tag,
262 respect_limits, NORMAL, std::move(params), proxy_annotation_tag, net_log);
263
264 // Cleanup any timed-out idle sockets.
265 CleanupIdleSockets(false, nullptr /* net_log_reason_utf8 */);
266
267 request->net_log().BeginEvent(NetLogEventType::SOCKET_POOL);
268
269 int rv =
270 RequestSocketInternal(group_id, *request,
271 /*preconnect_done_closure=*/base::OnceClosure());
272 if (rv != ERR_IO_PENDING) {
273 if (rv == OK) {
274 request->handle()->socket()->ApplySocketTag(request->socket_tag());
275 }
276 request->net_log().EndEventWithNetErrorCode(NetLogEventType::SOCKET_POOL,
277 rv);
278 CHECK(!request->handle()->is_initialized());
279 request.reset();
280 } else {
281 Group* group = GetOrCreateGroup(group_id);
282 group->InsertUnboundRequest(std::move(request));
283 // Have to do this asynchronously, as closing sockets in higher level pools
284 // call back in to |this|, which will cause all sorts of fun and exciting
285 // re-entrancy issues if the socket pool is doing something else at the
286 // time.
287 if (group->CanUseAdditionalSocketSlot(max_sockets_per_group_)) {
288 base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
289 FROM_HERE,
290 base::BindOnce(
291 &TransportClientSocketPool::TryToCloseSocketsInLayeredPools,
292 weak_factory_.GetWeakPtr()));
293 }
294 }
295 return rv;
296 }
297
RequestSockets(const GroupId & group_id,scoped_refptr<SocketParams> params,const absl::optional<NetworkTrafficAnnotationTag> & proxy_annotation_tag,int num_sockets,CompletionOnceCallback callback,const NetLogWithSource & net_log)298 int TransportClientSocketPool::RequestSockets(
299 const GroupId& group_id,
300 scoped_refptr<SocketParams> params,
301 const absl::optional<NetworkTrafficAnnotationTag>& proxy_annotation_tag,
302 int num_sockets,
303 CompletionOnceCallback callback,
304 const NetLogWithSource& net_log) {
305 // TODO(eroman): Split out the host and port parameters.
306 net_log.AddEvent(NetLogEventType::TCP_CLIENT_SOCKET_POOL_REQUESTED_SOCKETS,
307 [&] { return NetLogGroupIdParams(group_id); });
308
309 Request request(nullptr /* no handle */, CompletionOnceCallback(),
310 ProxyAuthCallback(), IDLE, SocketTag(),
311 RespectLimits::ENABLED, NO_IDLE_SOCKETS, std::move(params),
312 proxy_annotation_tag, net_log);
313
314 // Cleanup any timed-out idle sockets.
315 CleanupIdleSockets(false, nullptr /* net_log_reason_utf8 */);
316
317 if (num_sockets > max_sockets_per_group_) {
318 num_sockets = max_sockets_per_group_;
319 }
320
321 request.net_log().BeginEventWithIntParams(
322 NetLogEventType::SOCKET_POOL_CONNECTING_N_SOCKETS, "num_sockets",
323 num_sockets);
324
325 Group* group = GetOrCreateGroup(group_id);
326
327 // RequestSocketsInternal() may delete the group.
328 bool deleted_group = false;
329
330 int rv = OK;
331
332 base::RepeatingClosure preconnect_done_closure = base::BarrierClosure(
333 num_sockets,
334 base::BindOnce(
335 [](CompletionOnceCallback callback) {
336 base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
337 FROM_HERE, base::BindOnce(std::move(callback), OK));
338 },
339 std::move(callback)));
340 int pending_connect_job_count = 0;
341 for (int num_iterations_left = num_sockets;
342 group->NumActiveSocketSlots() < num_sockets && num_iterations_left > 0;
343 num_iterations_left--) {
344 rv = RequestSocketInternal(group_id, request, preconnect_done_closure);
345 if (rv == ERR_IO_PENDING) {
346 ++pending_connect_job_count;
347 }
348 if (rv < 0 && rv != ERR_IO_PENDING) {
349 // We're encountering a synchronous error. Give up.
350 if (!base::Contains(group_map_, group_id))
351 deleted_group = true;
352 break;
353 }
354 if (!base::Contains(group_map_, group_id)) {
355 // Unexpected. The group should only be getting deleted on synchronous
356 // error.
357 NOTREACHED();
358 deleted_group = true;
359 break;
360 }
361 }
362
363 if (!deleted_group && group->IsEmpty())
364 RemoveGroup(group_id);
365
366 if (rv == ERR_IO_PENDING)
367 rv = OK;
368 request.net_log().EndEventWithNetErrorCode(
369 NetLogEventType::SOCKET_POOL_CONNECTING_N_SOCKETS, rv);
370
371 // Currently we don't handle preconnect errors. So this method returns OK even
372 // if failed to preconnect.
373 // TODO(crbug.com/1330235): Consider support error handlings when needed.
374 if (pending_connect_job_count == 0)
375 return OK;
376 for (int i = 0; i < num_sockets - pending_connect_job_count; ++i) {
377 preconnect_done_closure.Run();
378 }
379
380 return ERR_IO_PENDING;
381 }
382
RequestSocketInternal(const GroupId & group_id,const Request & request,base::OnceClosure preconnect_done_closure)383 int TransportClientSocketPool::RequestSocketInternal(
384 const GroupId& group_id,
385 const Request& request,
386 base::OnceClosure preconnect_done_closure) {
387 #if DCHECK_IS_ON()
388 DCHECK(!request_in_process_);
389 base::AutoReset<bool> auto_reset(&request_in_process_, true);
390 #endif // DCHECK_IS_ON()
391
392 ClientSocketHandle* const handle = request.handle();
393 const bool preconnecting = !handle;
394 DCHECK_EQ(preconnecting, !!preconnect_done_closure);
395
396 Group* group = nullptr;
397 auto group_it = group_map_.find(group_id);
398 if (group_it != group_map_.end()) {
399 group = group_it->second;
400
401 if (!(request.flags() & NO_IDLE_SOCKETS)) {
402 // Try to reuse a socket.
403 if (AssignIdleSocketToRequest(request, group))
404 return OK;
405 }
406
407 // If there are more ConnectJobs than pending requests, don't need to do
408 // anything. Can just wait for the extra job to connect, and then assign it
409 // to the request.
410 if (!preconnecting && group->TryToUseNeverAssignedConnectJob())
411 return ERR_IO_PENDING;
412
413 // Can we make another active socket now?
414 if (!group->HasAvailableSocketSlot(max_sockets_per_group_) &&
415 request.respect_limits() == RespectLimits::ENABLED) {
416 // TODO(willchan): Consider whether or not we need to close a socket in a
417 // higher layered group. I don't think this makes sense since we would
418 // just reuse that socket then if we needed one and wouldn't make it down
419 // to this layer.
420 request.net_log().AddEvent(
421 NetLogEventType::SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP);
422 return preconnecting ? ERR_PRECONNECT_MAX_SOCKET_LIMIT : ERR_IO_PENDING;
423 }
424 }
425
426 if (ReachedMaxSocketsLimit() &&
427 request.respect_limits() == RespectLimits::ENABLED) {
428 // NOTE(mmenke): Wonder if we really need different code for each case
429 // here. Only reason for them now seems to be preconnects.
430 if (idle_socket_count_ > 0) {
431 // There's an idle socket in this pool. Either that's because there's
432 // still one in this group, but we got here due to preconnecting
433 // bypassing idle sockets, or because there's an idle socket in another
434 // group.
435 bool closed = CloseOneIdleSocketExceptInGroup(group);
436 if (preconnecting && !closed)
437 return ERR_PRECONNECT_MAX_SOCKET_LIMIT;
438 } else {
439 // We could check if we really have a stalled group here, but it
440 // requires a scan of all groups, so just flip a flag here, and do the
441 // check later.
442 request.net_log().AddEvent(
443 NetLogEventType::SOCKET_POOL_STALLED_MAX_SOCKETS);
444 return preconnecting ? ERR_PRECONNECT_MAX_SOCKET_LIMIT : ERR_IO_PENDING;
445 }
446 }
447
448 // We couldn't find a socket to reuse, and there's space to allocate one,
449 // so allocate and connect a new one.
450 group = GetOrCreateGroup(group_id);
451 std::unique_ptr<ConnectJob> connect_job(
452 CreateConnectJob(group_id, request.socket_params(), proxy_server_,
453 request.proxy_annotation_tag(), request.priority(),
454 request.socket_tag(), group));
455 connect_job->net_log().AddEvent(
456 NetLogEventType::SOCKET_POOL_CONNECT_JOB_CREATED, [&] {
457 return NetLogCreateConnectJobParams(false /* backup_job */, &group_id);
458 });
459
460 int rv = connect_job->Connect();
461 if (rv == ERR_IO_PENDING) {
462 if (preconnect_done_closure) {
463 DCHECK(preconnecting);
464 connect_job->set_done_closure(std::move(preconnect_done_closure));
465 }
466 // If we didn't have any sockets in this group, set a timer for potentially
467 // creating a new one. If the SYN is lost, this backup socket may complete
468 // before the slow socket, improving end user latency.
469 if (connect_backup_jobs_enabled_ && group->IsEmpty())
470 group->StartBackupJobTimer(group_id);
471 group->AddJob(std::move(connect_job), preconnecting);
472 connecting_socket_count_++;
473 return rv;
474 }
475
476 LogBoundConnectJobToRequest(connect_job->net_log().source(), request);
477 if (preconnecting) {
478 if (rv == OK)
479 AddIdleSocket(connect_job->PassSocket(), group);
480 } else {
481 DCHECK(handle);
482 if (rv != OK)
483 handle->SetAdditionalErrorState(connect_job.get());
484 std::unique_ptr<StreamSocket> socket = connect_job->PassSocket();
485 if (socket) {
486 HandOutSocket(std::move(socket), ClientSocketHandle::UNUSED,
487 connect_job->connect_timing(), handle,
488 base::TimeDelta() /* idle_time */, group,
489 request.net_log());
490 }
491 }
492 if (group->IsEmpty())
493 RemoveGroup(group_id);
494
495 return rv;
496 }
497
AssignIdleSocketToRequest(const Request & request,Group * group)498 bool TransportClientSocketPool::AssignIdleSocketToRequest(
499 const Request& request,
500 Group* group) {
501 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets();
502 auto idle_socket_it = idle_sockets->end();
503
504 // Iterate through the idle sockets forwards (oldest to newest)
505 // * Delete any disconnected ones.
506 // * If we find a used idle socket, assign to |idle_socket|. At the end,
507 // the |idle_socket_it| will be set to the newest used idle socket.
508 for (auto it = idle_sockets->begin(); it != idle_sockets->end();) {
509 // Check whether socket is usable. Note that it's unlikely that the socket
510 // is not usable because this function is always invoked after a
511 // reusability check, but in theory socket can be closed asynchronously.
512 const char* net_log_reason_utf8;
513 if (!it->IsUsable(&net_log_reason_utf8)) {
514 it->socket->NetLog().AddEventWithStringParams(
515 NetLogEventType::SOCKET_POOL_CLOSING_SOCKET, "reason",
516 net_log_reason_utf8);
517 DecrementIdleCount();
518 it = idle_sockets->erase(it);
519 continue;
520 }
521
522 if (it->socket->WasEverUsed()) {
523 // We found one we can reuse!
524 idle_socket_it = it;
525 }
526
527 ++it;
528 }
529
530 // If we haven't found an idle socket, that means there are no used idle
531 // sockets. Pick the oldest (first) idle socket (FIFO).
532
533 if (idle_socket_it == idle_sockets->end() && !idle_sockets->empty())
534 idle_socket_it = idle_sockets->begin();
535
536 if (idle_socket_it != idle_sockets->end()) {
537 DecrementIdleCount();
538 base::TimeDelta idle_time =
539 base::TimeTicks::Now() - idle_socket_it->start_time;
540 std::unique_ptr<StreamSocket> socket = std::move(idle_socket_it->socket);
541 idle_sockets->erase(idle_socket_it);
542 // TODO(davidben): If |idle_time| is under some low watermark, consider
543 // treating as UNUSED rather than UNUSED_IDLE. This will avoid
544 // HttpNetworkTransaction retrying on some errors.
545 ClientSocketHandle::SocketReuseType reuse_type =
546 socket->WasEverUsed() ? ClientSocketHandle::REUSED_IDLE
547 : ClientSocketHandle::UNUSED_IDLE;
548
549 HandOutSocket(std::move(socket), reuse_type,
550 LoadTimingInfo::ConnectTiming(), request.handle(), idle_time,
551 group, request.net_log());
552 return true;
553 }
554
555 return false;
556 }
557
558 // static
LogBoundConnectJobToRequest(const NetLogSource & connect_job_source,const Request & request)559 void TransportClientSocketPool::LogBoundConnectJobToRequest(
560 const NetLogSource& connect_job_source,
561 const Request& request) {
562 request.net_log().AddEventReferencingSource(
563 NetLogEventType::SOCKET_POOL_BOUND_TO_CONNECT_JOB, connect_job_source);
564 }
565
SetPriority(const GroupId & group_id,ClientSocketHandle * handle,RequestPriority priority)566 void TransportClientSocketPool::SetPriority(const GroupId& group_id,
567 ClientSocketHandle* handle,
568 RequestPriority priority) {
569 auto group_it = group_map_.find(group_id);
570 if (group_it == group_map_.end()) {
571 DCHECK(base::Contains(pending_callback_map_, handle));
572 // The Request has already completed and been destroyed; nothing to
573 // reprioritize.
574 return;
575 }
576
577 group_it->second->SetPriority(handle, priority);
578 }
579
CancelRequest(const GroupId & group_id,ClientSocketHandle * handle,bool cancel_connect_job)580 void TransportClientSocketPool::CancelRequest(const GroupId& group_id,
581 ClientSocketHandle* handle,
582 bool cancel_connect_job) {
583 auto callback_it = pending_callback_map_.find(handle);
584 if (callback_it != pending_callback_map_.end()) {
585 int result = callback_it->second.result;
586 pending_callback_map_.erase(callback_it);
587 std::unique_ptr<StreamSocket> socket = handle->PassSocket();
588 if (socket) {
589 if (result != OK) {
590 socket->Disconnect();
591 } else if (cancel_connect_job) {
592 // Close the socket if |cancel_connect_job| is true and there are no
593 // other pending requests.
594 Group* group = GetOrCreateGroup(group_id);
595 if (group->unbound_request_count() == 0)
596 socket->Disconnect();
597 }
598 ReleaseSocket(handle->group_id(), std::move(socket),
599 handle->group_generation());
600 }
601 return;
602 }
603
604 CHECK(base::Contains(group_map_, group_id));
605 Group* group = GetOrCreateGroup(group_id);
606
607 std::unique_ptr<Request> request = group->FindAndRemoveBoundRequest(handle);
608 if (request) {
609 --connecting_socket_count_;
610 OnAvailableSocketSlot(group_id, group);
611 CheckForStalledSocketGroups();
612 return;
613 }
614
615 // Search |unbound_requests_| for matching handle.
616 request = group->FindAndRemoveUnboundRequest(handle);
617 if (request) {
618 request->net_log().AddEvent(NetLogEventType::CANCELLED);
619 request->net_log().EndEvent(NetLogEventType::SOCKET_POOL);
620
621 // Let the job run, unless |cancel_connect_job| is true, or we're at the
622 // socket limit and there are no other requests waiting on the job.
623 bool reached_limit = ReachedMaxSocketsLimit();
624 if (group->jobs().size() > group->unbound_request_count() &&
625 (cancel_connect_job || reached_limit)) {
626 RemoveConnectJob(group->jobs().begin()->get(), group);
627 if (group->IsEmpty())
628 RemoveGroup(group->group_id());
629 if (reached_limit)
630 CheckForStalledSocketGroups();
631 }
632 }
633 }
634
CloseIdleSockets(const char * net_log_reason_utf8)635 void TransportClientSocketPool::CloseIdleSockets(
636 const char* net_log_reason_utf8) {
637 CleanupIdleSockets(true, net_log_reason_utf8);
638 DCHECK_EQ(0, idle_socket_count_);
639 }
640
CloseIdleSocketsInGroup(const GroupId & group_id,const char * net_log_reason_utf8)641 void TransportClientSocketPool::CloseIdleSocketsInGroup(
642 const GroupId& group_id,
643 const char* net_log_reason_utf8) {
644 if (idle_socket_count_ == 0)
645 return;
646 auto it = group_map_.find(group_id);
647 if (it == group_map_.end())
648 return;
649 CleanupIdleSocketsInGroup(true, it->second, base::TimeTicks::Now(),
650 net_log_reason_utf8);
651 if (it->second->IsEmpty())
652 RemoveGroup(it);
653 }
654
IdleSocketCount() const655 int TransportClientSocketPool::IdleSocketCount() const {
656 return idle_socket_count_;
657 }
658
IdleSocketCountInGroup(const GroupId & group_id) const659 size_t TransportClientSocketPool::IdleSocketCountInGroup(
660 const GroupId& group_id) const {
661 auto i = group_map_.find(group_id);
662 CHECK(i != group_map_.end());
663
664 return i->second->idle_sockets().size();
665 }
666
GetLoadState(const GroupId & group_id,const ClientSocketHandle * handle) const667 LoadState TransportClientSocketPool::GetLoadState(
668 const GroupId& group_id,
669 const ClientSocketHandle* handle) const {
670 if (base::Contains(pending_callback_map_, handle))
671 return LOAD_STATE_CONNECTING;
672
673 auto group_it = group_map_.find(group_id);
674 if (group_it == group_map_.end()) {
675 // TODO(mmenke): This is actually reached in the wild, for unknown reasons.
676 // Would be great to understand why, and if it's a bug, fix it. If not,
677 // should have a test for that case.
678 NOTREACHED();
679 return LOAD_STATE_IDLE;
680 }
681
682 const Group& group = *group_it->second;
683 ConnectJob* job = group.GetConnectJobForHandle(handle);
684 if (job)
685 return job->GetLoadState();
686
687 if (group.CanUseAdditionalSocketSlot(max_sockets_per_group_))
688 return LOAD_STATE_WAITING_FOR_STALLED_SOCKET_POOL;
689 return LOAD_STATE_WAITING_FOR_AVAILABLE_SOCKET;
690 }
691
GetInfoAsValue(const std::string & name,const std::string & type) const692 base::Value TransportClientSocketPool::GetInfoAsValue(
693 const std::string& name,
694 const std::string& type) const {
695 // TODO(mmenke): This currently doesn't return bound Requests or ConnectJobs.
696 base::Value::Dict dict;
697 dict.Set("name", name);
698 dict.Set("type", type);
699 dict.Set("handed_out_socket_count", handed_out_socket_count_);
700 dict.Set("connecting_socket_count", connecting_socket_count_);
701 dict.Set("idle_socket_count", idle_socket_count_);
702 dict.Set("max_socket_count", max_sockets_);
703 dict.Set("max_sockets_per_group", max_sockets_per_group_);
704
705 if (group_map_.empty())
706 return base::Value(std::move(dict));
707
708 base::Value::Dict all_groups_dict;
709 for (const auto& entry : group_map_) {
710 const Group* group = entry.second;
711 base::Value::Dict group_dict;
712
713 group_dict.Set("pending_request_count",
714 static_cast<int>(group->unbound_request_count()));
715 if (group->has_unbound_requests()) {
716 group_dict.Set("top_pending_priority",
717 RequestPriorityToString(group->TopPendingPriority()));
718 }
719
720 group_dict.Set("active_socket_count", group->active_socket_count());
721
722 base::Value::List idle_socket_list;
723 for (const auto& idle_socket : group->idle_sockets()) {
724 int source_id = idle_socket.socket->NetLog().source().id;
725 idle_socket_list.Append(source_id);
726 }
727 group_dict.Set("idle_sockets", std::move(idle_socket_list));
728
729 base::Value::List connect_jobs_list;
730 for (const auto& job : group->jobs()) {
731 int source_id = job->net_log().source().id;
732 connect_jobs_list.Append(source_id);
733 }
734 group_dict.Set("connect_jobs", std::move(connect_jobs_list));
735
736 group_dict.Set("is_stalled",
737 group->CanUseAdditionalSocketSlot(max_sockets_per_group_));
738 group_dict.Set("backup_job_timer_is_running",
739 group->BackupJobTimerIsRunning());
740
741 all_groups_dict.Set(entry.first.ToString(), std::move(group_dict));
742 }
743 dict.Set("groups", std::move(all_groups_dict));
744 return base::Value(std::move(dict));
745 }
746
HasActiveSocket(const GroupId & group_id) const747 bool TransportClientSocketPool::HasActiveSocket(const GroupId& group_id) const {
748 return HasGroup(group_id);
749 }
750
IsUsable(const char ** net_log_reason_utf8) const751 bool TransportClientSocketPool::IdleSocket::IsUsable(
752 const char** net_log_reason_utf8) const {
753 DCHECK(net_log_reason_utf8);
754 if (socket->WasEverUsed()) {
755 if (!socket->IsConnectedAndIdle()) {
756 if (!socket->IsConnected()) {
757 *net_log_reason_utf8 = kRemoteSideClosedConnection;
758 } else {
759 *net_log_reason_utf8 = kDataReceivedUnexpectedly;
760 }
761 return false;
762 }
763 return true;
764 }
765
766 if (!socket->IsConnected()) {
767 *net_log_reason_utf8 = kRemoteSideClosedConnection;
768 return false;
769 }
770 return true;
771 }
772
TransportClientSocketPool(int max_sockets,int max_sockets_per_group,base::TimeDelta unused_idle_socket_timeout,base::TimeDelta used_idle_socket_timeout,const ProxyServer & proxy_server,bool is_for_websockets,const CommonConnectJobParams * common_connect_job_params,bool cleanup_on_ip_address_change,std::unique_ptr<ConnectJobFactory> connect_job_factory,SSLClientContext * ssl_client_context,bool connect_backup_jobs_enabled)773 TransportClientSocketPool::TransportClientSocketPool(
774 int max_sockets,
775 int max_sockets_per_group,
776 base::TimeDelta unused_idle_socket_timeout,
777 base::TimeDelta used_idle_socket_timeout,
778 const ProxyServer& proxy_server,
779 bool is_for_websockets,
780 const CommonConnectJobParams* common_connect_job_params,
781 bool cleanup_on_ip_address_change,
782 std::unique_ptr<ConnectJobFactory> connect_job_factory,
783 SSLClientContext* ssl_client_context,
784 bool connect_backup_jobs_enabled)
785 : ClientSocketPool(is_for_websockets,
786 common_connect_job_params,
787 std::move(connect_job_factory)),
788 max_sockets_(max_sockets),
789 max_sockets_per_group_(max_sockets_per_group),
790 unused_idle_socket_timeout_(unused_idle_socket_timeout),
791 used_idle_socket_timeout_(used_idle_socket_timeout),
792 proxy_server_(proxy_server),
793 cleanup_on_ip_address_change_(cleanup_on_ip_address_change),
794 connect_backup_jobs_enabled_(connect_backup_jobs_enabled &&
795 g_connect_backup_jobs_enabled),
796 ssl_client_context_(ssl_client_context) {
797 DCHECK_LE(0, max_sockets_per_group);
798 DCHECK_LE(max_sockets_per_group, max_sockets);
799
800 if (cleanup_on_ip_address_change_)
801 NetworkChangeNotifier::AddIPAddressObserver(this);
802
803 if (ssl_client_context_)
804 ssl_client_context_->AddObserver(this);
805 }
806
OnSSLConfigChanged(SSLClientContext::SSLConfigChangeType change_type)807 void TransportClientSocketPool::OnSSLConfigChanged(
808 SSLClientContext::SSLConfigChangeType change_type) {
809 const char* message = nullptr;
810 // When the SSL config or cert verifier config changes, flush all idle
811 // sockets so they won't get re-used, and allow any active sockets to finish,
812 // but don't put them back in the socket pool.
813 switch (change_type) {
814 case SSLClientContext::SSLConfigChangeType::kSSLConfigChanged:
815 message = kNetworkChanged;
816 break;
817 case SSLClientContext::SSLConfigChangeType::kCertDatabaseChanged:
818 message = kCertDatabaseChanged;
819 break;
820 case SSLClientContext::SSLConfigChangeType::kCertVerifierChanged:
821 message = kCertVerifierChanged;
822 break;
823 };
824
825 base::TimeTicks now = base::TimeTicks::Now();
826 for (auto it = group_map_.begin(); it != group_map_.end();) {
827 it = RefreshGroup(it, now, message);
828 }
829 CheckForStalledSocketGroups();
830 }
831
832 // TODO(crbug.com/1206799): Get `server` as SchemeHostPort?
OnSSLConfigForServerChanged(const HostPortPair & server)833 void TransportClientSocketPool::OnSSLConfigForServerChanged(
834 const HostPortPair& server) {
835 // Current time value. Retrieving it once at the function start rather than
836 // inside the inner loop, since it shouldn't change by any meaningful amount.
837 //
838 // TODO(davidben): This value is not actually needed because
839 // CleanupIdleSocketsInGroup() is called with |force| = true. Tidy up
840 // interfaces so the parameter is not necessary.
841 base::TimeTicks now = base::TimeTicks::Now();
842
843 // If the proxy is |server| and uses SSL settings (HTTPS or QUIC), refresh
844 // every group.
845 bool proxy_matches = proxy_server_.is_http_like() &&
846 !proxy_server_.is_http() &&
847 proxy_server_.host_port_pair() == server;
848 bool refreshed_any = false;
849 for (auto it = group_map_.begin(); it != group_map_.end();) {
850 if (proxy_matches ||
851 (GURL::SchemeIsCryptographic(it->first.destination().scheme()) &&
852 HostPortPair::FromSchemeHostPort(it->first.destination()) == server)) {
853 refreshed_any = true;
854 // Note this call may destroy the group and invalidate |to_refresh|.
855 it = RefreshGroup(it, now, kSslConfigChanged);
856 } else {
857 ++it;
858 }
859 }
860
861 if (refreshed_any) {
862 // Check to see if any group can use the freed up socket slots. It would be
863 // more efficient to give the slots to the refreshed groups, if the still
864 // exists and need them, but this should be rare enough that it doesn't
865 // matter. This will also make sure the slots are given to the group with
866 // the highest priority request without an assigned ConnectJob.
867 CheckForStalledSocketGroups();
868 }
869 }
870
HasGroup(const GroupId & group_id) const871 bool TransportClientSocketPool::HasGroup(const GroupId& group_id) const {
872 return base::Contains(group_map_, group_id);
873 }
874
CleanupIdleSockets(bool force,const char * net_log_reason_utf8)875 void TransportClientSocketPool::CleanupIdleSockets(
876 bool force,
877 const char* net_log_reason_utf8) {
878 if (idle_socket_count_ == 0)
879 return;
880
881 // Current time value. Retrieving it once at the function start rather than
882 // inside the inner loop, since it shouldn't change by any meaningful amount.
883 base::TimeTicks now = base::TimeTicks::Now();
884
885 for (auto i = group_map_.begin(); i != group_map_.end();) {
886 Group* group = i->second;
887 CHECK(group);
888 CleanupIdleSocketsInGroup(force, group, now, net_log_reason_utf8);
889 // Delete group if no longer needed.
890 if (group->IsEmpty()) {
891 i = RemoveGroup(i);
892 } else {
893 ++i;
894 }
895 }
896 }
897
CloseOneIdleSocket()898 bool TransportClientSocketPool::CloseOneIdleSocket() {
899 if (idle_socket_count_ == 0)
900 return false;
901 return CloseOneIdleSocketExceptInGroup(nullptr);
902 }
903
CloseOneIdleConnectionInHigherLayeredPool()904 bool TransportClientSocketPool::CloseOneIdleConnectionInHigherLayeredPool() {
905 // This pool doesn't have any idle sockets. It's possible that a pool at a
906 // higher layer is holding one of this sockets active, but it's actually idle.
907 // Query the higher layers.
908 for (auto* higher_pool : higher_pools_) {
909 if (higher_pool->CloseOneIdleConnection())
910 return true;
911 }
912 return false;
913 }
914
CleanupIdleSocketsInGroup(bool force,Group * group,const base::TimeTicks & now,const char * net_log_reason_utf8)915 void TransportClientSocketPool::CleanupIdleSocketsInGroup(
916 bool force,
917 Group* group,
918 const base::TimeTicks& now,
919 const char* net_log_reason_utf8) {
920 // If |force| is true, a reason must be provided.
921 DCHECK(!force || net_log_reason_utf8);
922
923 auto idle_socket_it = group->mutable_idle_sockets()->begin();
924 while (idle_socket_it != group->idle_sockets().end()) {
925 bool should_clean_up = force;
926 const char* reason_for_closing_socket = net_log_reason_utf8;
927 base::TimeDelta timeout = idle_socket_it->socket->WasEverUsed()
928 ? used_idle_socket_timeout_
929 : unused_idle_socket_timeout_;
930
931 // Timeout errors take precedence over the reason for flushing sockets in
932 // the group, if applicable.
933 if (now - idle_socket_it->start_time >= timeout) {
934 should_clean_up = true;
935 reason_for_closing_socket = kIdleTimeLimitExpired;
936 }
937
938 // Usability errors take precedence over over other errors.
939 if (!idle_socket_it->IsUsable(&reason_for_closing_socket))
940 should_clean_up = true;
941
942 if (should_clean_up) {
943 DCHECK(reason_for_closing_socket);
944 idle_socket_it->socket->NetLog().AddEventWithStringParams(
945 NetLogEventType::SOCKET_POOL_CLOSING_SOCKET, "reason",
946 reason_for_closing_socket);
947 idle_socket_it = group->mutable_idle_sockets()->erase(idle_socket_it);
948 DecrementIdleCount();
949 } else {
950 DCHECK(!reason_for_closing_socket);
951 ++idle_socket_it;
952 }
953 }
954 }
955
GetOrCreateGroup(const GroupId & group_id)956 TransportClientSocketPool::Group* TransportClientSocketPool::GetOrCreateGroup(
957 const GroupId& group_id) {
958 auto it = group_map_.find(group_id);
959 if (it != group_map_.end())
960 return it->second;
961 Group* group = new Group(group_id, this);
962 group_map_[group_id] = group;
963 return group;
964 }
965
RemoveGroup(const GroupId & group_id)966 void TransportClientSocketPool::RemoveGroup(const GroupId& group_id) {
967 auto it = group_map_.find(group_id);
968 CHECK(it != group_map_.end());
969
970 RemoveGroup(it);
971 }
972
973 TransportClientSocketPool::GroupMap::iterator
RemoveGroup(GroupMap::iterator it)974 TransportClientSocketPool::RemoveGroup(GroupMap::iterator it) {
975 delete it->second;
976 return group_map_.erase(it);
977 }
978
979 // static
connect_backup_jobs_enabled()980 bool TransportClientSocketPool::connect_backup_jobs_enabled() {
981 return g_connect_backup_jobs_enabled;
982 }
983
984 // static
set_connect_backup_jobs_enabled(bool enabled)985 bool TransportClientSocketPool::set_connect_backup_jobs_enabled(bool enabled) {
986 bool old_value = g_connect_backup_jobs_enabled;
987 g_connect_backup_jobs_enabled = enabled;
988 return old_value;
989 }
990
IncrementIdleCount()991 void TransportClientSocketPool::IncrementIdleCount() {
992 ++idle_socket_count_;
993 }
994
DecrementIdleCount()995 void TransportClientSocketPool::DecrementIdleCount() {
996 --idle_socket_count_;
997 }
998
ReleaseSocket(const GroupId & group_id,std::unique_ptr<StreamSocket> socket,int64_t group_generation)999 void TransportClientSocketPool::ReleaseSocket(
1000 const GroupId& group_id,
1001 std::unique_ptr<StreamSocket> socket,
1002 int64_t group_generation) {
1003 auto i = group_map_.find(group_id);
1004 CHECK(i != group_map_.end());
1005
1006 Group* group = i->second;
1007 CHECK(group);
1008
1009 CHECK_GT(handed_out_socket_count_, 0);
1010 handed_out_socket_count_--;
1011
1012 CHECK_GT(group->active_socket_count(), 0);
1013 group->DecrementActiveSocketCount();
1014
1015 bool can_resuse_socket = false;
1016 base::StringPiece not_reusable_reason;
1017 if (!socket->IsConnectedAndIdle()) {
1018 if (!socket->IsConnected()) {
1019 not_reusable_reason = kClosedConnectionReturnedToPool;
1020 } else {
1021 not_reusable_reason = kDataReceivedUnexpectedly;
1022 }
1023 } else if (group_generation != group->generation()) {
1024 not_reusable_reason = kSocketGenerationOutOfDate;
1025 } else {
1026 can_resuse_socket = true;
1027 }
1028
1029 if (can_resuse_socket) {
1030 DCHECK(not_reusable_reason.empty());
1031
1032 // Add it to the idle list.
1033 AddIdleSocket(std::move(socket), group);
1034 OnAvailableSocketSlot(group_id, group);
1035 } else {
1036 DCHECK(!not_reusable_reason.empty());
1037
1038 socket->NetLog().AddEventWithStringParams(
1039 NetLogEventType::SOCKET_POOL_CLOSING_SOCKET, "reason",
1040 not_reusable_reason);
1041 if (group->IsEmpty())
1042 RemoveGroup(i);
1043 socket.reset();
1044 }
1045
1046 CheckForStalledSocketGroups();
1047 }
1048
CheckForStalledSocketGroups()1049 void TransportClientSocketPool::CheckForStalledSocketGroups() {
1050 // Loop until there's nothing more to do.
1051 while (true) {
1052 // If we have idle sockets, see if we can give one to the top-stalled group.
1053 GroupId top_group_id;
1054 Group* top_group = nullptr;
1055 if (!FindTopStalledGroup(&top_group, &top_group_id))
1056 return;
1057
1058 if (ReachedMaxSocketsLimit()) {
1059 if (idle_socket_count_ > 0) {
1060 CloseOneIdleSocket();
1061 } else {
1062 // We can't activate more sockets since we're already at our global
1063 // limit.
1064 return;
1065 }
1066 }
1067
1068 // Note that this may delete top_group.
1069 OnAvailableSocketSlot(top_group_id, top_group);
1070 }
1071 }
1072
1073 // Search for the highest priority pending request, amongst the groups that
1074 // are not at the |max_sockets_per_group_| limit. Note: for requests with
1075 // the same priority, the winner is based on group hash ordering (and not
1076 // insertion order).
FindTopStalledGroup(Group ** group,GroupId * group_id) const1077 bool TransportClientSocketPool::FindTopStalledGroup(Group** group,
1078 GroupId* group_id) const {
1079 CHECK((group && group_id) || (!group && !group_id));
1080 Group* top_group = nullptr;
1081 const GroupId* top_group_id = nullptr;
1082 bool has_stalled_group = false;
1083 for (const auto& it : group_map_) {
1084 Group* curr_group = it.second;
1085 if (!curr_group->has_unbound_requests())
1086 continue;
1087 if (curr_group->CanUseAdditionalSocketSlot(max_sockets_per_group_)) {
1088 if (!group)
1089 return true;
1090 has_stalled_group = true;
1091 bool has_higher_priority =
1092 !top_group ||
1093 curr_group->TopPendingPriority() > top_group->TopPendingPriority();
1094 if (has_higher_priority) {
1095 top_group = curr_group;
1096 top_group_id = &it.first;
1097 }
1098 }
1099 }
1100
1101 if (top_group) {
1102 CHECK(group);
1103 *group = top_group;
1104 *group_id = *top_group_id;
1105 } else {
1106 CHECK(!has_stalled_group);
1107 }
1108 return has_stalled_group;
1109 }
1110
OnIPAddressChanged()1111 void TransportClientSocketPool::OnIPAddressChanged() {
1112 DCHECK(cleanup_on_ip_address_change_);
1113 FlushWithError(ERR_NETWORK_CHANGED, kNetworkChanged);
1114 }
1115
FlushWithError(int error,const char * net_log_reason_utf8)1116 void TransportClientSocketPool::FlushWithError(
1117 int error,
1118 const char* net_log_reason_utf8) {
1119 CancelAllConnectJobs();
1120 CloseIdleSockets(net_log_reason_utf8);
1121 CancelAllRequestsWithError(error);
1122 for (const auto& group : group_map_) {
1123 group.second->IncrementGeneration();
1124 }
1125 }
1126
RemoveConnectJob(ConnectJob * job,Group * group)1127 void TransportClientSocketPool::RemoveConnectJob(ConnectJob* job,
1128 Group* group) {
1129 CHECK_GT(connecting_socket_count_, 0);
1130 connecting_socket_count_--;
1131
1132 DCHECK(group);
1133 group->RemoveUnboundJob(job);
1134 }
1135
OnAvailableSocketSlot(const GroupId & group_id,Group * group)1136 void TransportClientSocketPool::OnAvailableSocketSlot(const GroupId& group_id,
1137 Group* group) {
1138 DCHECK(base::Contains(group_map_, group_id));
1139 if (group->IsEmpty()) {
1140 RemoveGroup(group_id);
1141 } else if (group->has_unbound_requests()) {
1142 ProcessPendingRequest(group_id, group);
1143 }
1144 }
1145
ProcessPendingRequest(const GroupId & group_id,Group * group)1146 void TransportClientSocketPool::ProcessPendingRequest(const GroupId& group_id,
1147 Group* group) {
1148 const Request* next_request = group->GetNextUnboundRequest();
1149 DCHECK(next_request);
1150
1151 // If the group has no idle sockets, and can't make use of an additional slot,
1152 // either because it's at the limit or because it's at the socket per group
1153 // limit, then there's nothing to do.
1154 if (group->idle_sockets().empty() &&
1155 !group->CanUseAdditionalSocketSlot(max_sockets_per_group_)) {
1156 return;
1157 }
1158
1159 int rv =
1160 RequestSocketInternal(group_id, *next_request,
1161 /*preconnect_done_closure=*/base::OnceClosure());
1162 if (rv != ERR_IO_PENDING) {
1163 std::unique_ptr<Request> request = group->PopNextUnboundRequest();
1164 DCHECK(request);
1165 if (group->IsEmpty())
1166 RemoveGroup(group_id);
1167
1168 request->net_log().EndEventWithNetErrorCode(NetLogEventType::SOCKET_POOL,
1169 rv);
1170 InvokeUserCallbackLater(request->handle(), request->release_callback(), rv,
1171 request->socket_tag());
1172 }
1173 }
1174
HandOutSocket(std::unique_ptr<StreamSocket> socket,ClientSocketHandle::SocketReuseType reuse_type,const LoadTimingInfo::ConnectTiming & connect_timing,ClientSocketHandle * handle,base::TimeDelta idle_time,Group * group,const NetLogWithSource & net_log)1175 void TransportClientSocketPool::HandOutSocket(
1176 std::unique_ptr<StreamSocket> socket,
1177 ClientSocketHandle::SocketReuseType reuse_type,
1178 const LoadTimingInfo::ConnectTiming& connect_timing,
1179 ClientSocketHandle* handle,
1180 base::TimeDelta idle_time,
1181 Group* group,
1182 const NetLogWithSource& net_log) {
1183 DCHECK(socket);
1184 handle->SetSocket(std::move(socket));
1185 handle->set_reuse_type(reuse_type);
1186 handle->set_idle_time(idle_time);
1187 handle->set_group_generation(group->generation());
1188 handle->set_connect_timing(connect_timing);
1189
1190 if (reuse_type == ClientSocketHandle::REUSED_IDLE) {
1191 net_log.AddEventWithIntParams(
1192 NetLogEventType::SOCKET_POOL_REUSED_AN_EXISTING_SOCKET, "idle_ms",
1193 static_cast<int>(idle_time.InMilliseconds()));
1194 }
1195
1196 net_log.AddEventReferencingSource(
1197 NetLogEventType::SOCKET_POOL_BOUND_TO_SOCKET,
1198 handle->socket()->NetLog().source());
1199
1200 handed_out_socket_count_++;
1201 group->IncrementActiveSocketCount();
1202 }
1203
AddIdleSocket(std::unique_ptr<StreamSocket> socket,Group * group)1204 void TransportClientSocketPool::AddIdleSocket(
1205 std::unique_ptr<StreamSocket> socket,
1206 Group* group) {
1207 DCHECK(socket);
1208 IdleSocket idle_socket;
1209 idle_socket.socket = std::move(socket);
1210 idle_socket.start_time = base::TimeTicks::Now();
1211
1212 group->mutable_idle_sockets()->push_back(std::move(idle_socket));
1213 IncrementIdleCount();
1214 }
1215
CancelAllConnectJobs()1216 void TransportClientSocketPool::CancelAllConnectJobs() {
1217 for (auto i = group_map_.begin(); i != group_map_.end();) {
1218 Group* group = i->second;
1219 CHECK(group);
1220 connecting_socket_count_ -= group->jobs().size();
1221 group->RemoveAllUnboundJobs();
1222
1223 // Delete group if no longer needed.
1224 if (group->IsEmpty()) {
1225 i = RemoveGroup(i);
1226 } else {
1227 ++i;
1228 }
1229 }
1230 }
1231
CancelAllRequestsWithError(int error)1232 void TransportClientSocketPool::CancelAllRequestsWithError(int error) {
1233 for (auto i = group_map_.begin(); i != group_map_.end();) {
1234 Group* group = i->second;
1235 CHECK(group);
1236
1237 while (true) {
1238 std::unique_ptr<Request> request = group->PopNextUnboundRequest();
1239 if (!request)
1240 break;
1241 InvokeUserCallbackLater(request->handle(), request->release_callback(),
1242 error, request->socket_tag());
1243 }
1244
1245 // Mark bound connect jobs as needing to fail. Can't fail them immediately
1246 // because they may have access to objects owned by the ConnectJob, and
1247 // could access them if a user callback invocation is queued. It would also
1248 // result in the consumer handling two messages at once, which in general
1249 // isn't safe for a lot of code.
1250 group->SetPendingErrorForAllBoundRequests(error);
1251
1252 // Delete group if no longer needed.
1253 if (group->IsEmpty()) {
1254 i = RemoveGroup(i);
1255 } else {
1256 ++i;
1257 }
1258 }
1259 }
1260
ReachedMaxSocketsLimit() const1261 bool TransportClientSocketPool::ReachedMaxSocketsLimit() const {
1262 // Each connecting socket will eventually connect and be handed out.
1263 int total =
1264 handed_out_socket_count_ + connecting_socket_count_ + idle_socket_count_;
1265 // There can be more sockets than the limit since some requests can ignore
1266 // the limit
1267 if (total < max_sockets_)
1268 return false;
1269 return true;
1270 }
1271
CloseOneIdleSocketExceptInGroup(const Group * exception_group)1272 bool TransportClientSocketPool::CloseOneIdleSocketExceptInGroup(
1273 const Group* exception_group) {
1274 CHECK_GT(idle_socket_count_, 0);
1275
1276 for (auto i = group_map_.begin(); i != group_map_.end(); ++i) {
1277 Group* group = i->second;
1278 CHECK(group);
1279 if (exception_group == group)
1280 continue;
1281 std::list<IdleSocket>* idle_sockets = group->mutable_idle_sockets();
1282
1283 if (!idle_sockets->empty()) {
1284 idle_sockets->pop_front();
1285 DecrementIdleCount();
1286 if (group->IsEmpty())
1287 RemoveGroup(i);
1288
1289 return true;
1290 }
1291 }
1292
1293 return false;
1294 }
1295
OnConnectJobComplete(Group * group,int result,ConnectJob * job)1296 void TransportClientSocketPool::OnConnectJobComplete(Group* group,
1297 int result,
1298 ConnectJob* job) {
1299 DCHECK_NE(ERR_IO_PENDING, result);
1300 DCHECK(group_map_.find(group->group_id()) != group_map_.end());
1301 DCHECK_EQ(group, group_map_[group->group_id()]);
1302 DCHECK(result != OK || job->socket() != nullptr);
1303
1304 // Check if the ConnectJob is already bound to a Request. If so, result is
1305 // returned to that specific request.
1306 absl::optional<Group::BoundRequest> bound_request =
1307 group->FindAndRemoveBoundRequestForConnectJob(job);
1308 Request* request = nullptr;
1309 std::unique_ptr<Request> owned_request;
1310 if (bound_request) {
1311 --connecting_socket_count_;
1312
1313 // If the socket pools were previously flushed with an error, return that
1314 // error to the bound request and discard the socket.
1315 if (bound_request->pending_error != OK) {
1316 InvokeUserCallbackLater(bound_request->request->handle(),
1317 bound_request->request->release_callback(),
1318 bound_request->pending_error,
1319 bound_request->request->socket_tag());
1320 bound_request->request->net_log().EndEventWithNetErrorCode(
1321 NetLogEventType::SOCKET_POOL, bound_request->pending_error);
1322 OnAvailableSocketSlot(group->group_id(), group);
1323 CheckForStalledSocketGroups();
1324 return;
1325 }
1326
1327 // If the ConnectJob is from a previous generation, add the request back to
1328 // the group, and kick off another request. The socket will be discarded.
1329 if (bound_request->generation != group->generation()) {
1330 group->InsertUnboundRequest(std::move(bound_request->request));
1331 OnAvailableSocketSlot(group->group_id(), group);
1332 CheckForStalledSocketGroups();
1333 return;
1334 }
1335
1336 request = bound_request->request.get();
1337 } else {
1338 // In this case, RemoveConnectJob(job, _) must be called before exiting this
1339 // method. Otherwise, |job| will be leaked.
1340 owned_request = group->PopNextUnboundRequest();
1341 request = owned_request.get();
1342
1343 if (!request) {
1344 if (result == OK)
1345 AddIdleSocket(job->PassSocket(), group);
1346 RemoveConnectJob(job, group);
1347 OnAvailableSocketSlot(group->group_id(), group);
1348 CheckForStalledSocketGroups();
1349 return;
1350 }
1351
1352 LogBoundConnectJobToRequest(job->net_log().source(), *request);
1353 }
1354
1355 // The case where there's no request is handled above.
1356 DCHECK(request);
1357
1358 if (result != OK)
1359 request->handle()->SetAdditionalErrorState(job);
1360 if (job->socket()) {
1361 HandOutSocket(job->PassSocket(), ClientSocketHandle::UNUSED,
1362 job->connect_timing(), request->handle(), base::TimeDelta(),
1363 group, request->net_log());
1364 }
1365 request->net_log().EndEventWithNetErrorCode(NetLogEventType::SOCKET_POOL,
1366 result);
1367 InvokeUserCallbackLater(request->handle(), request->release_callback(),
1368 result, request->socket_tag());
1369 if (!bound_request)
1370 RemoveConnectJob(job, group);
1371 // If no socket was handed out, there's a new socket slot available.
1372 if (!request->handle()->socket()) {
1373 OnAvailableSocketSlot(group->group_id(), group);
1374 CheckForStalledSocketGroups();
1375 }
1376 }
1377
OnNeedsProxyAuth(Group * group,const HttpResponseInfo & response,HttpAuthController * auth_controller,base::OnceClosure restart_with_auth_callback,ConnectJob * job)1378 void TransportClientSocketPool::OnNeedsProxyAuth(
1379 Group* group,
1380 const HttpResponseInfo& response,
1381 HttpAuthController* auth_controller,
1382 base::OnceClosure restart_with_auth_callback,
1383 ConnectJob* job) {
1384 DCHECK(group_map_.find(group->group_id()) != group_map_.end());
1385 DCHECK_EQ(group, group_map_[group->group_id()]);
1386
1387 const Request* request = group->BindRequestToConnectJob(job);
1388 // If can't bind the ConnectJob to a request, treat this as a ConnectJob
1389 // failure.
1390 if (!request) {
1391 OnConnectJobComplete(group, ERR_PROXY_AUTH_REQUESTED, job);
1392 return;
1393 }
1394
1395 request->proxy_auth_callback().Run(response, auth_controller,
1396 std::move(restart_with_auth_callback));
1397 }
1398
InvokeUserCallbackLater(ClientSocketHandle * handle,CompletionOnceCallback callback,int rv,const SocketTag & socket_tag)1399 void TransportClientSocketPool::InvokeUserCallbackLater(
1400 ClientSocketHandle* handle,
1401 CompletionOnceCallback callback,
1402 int rv,
1403 const SocketTag& socket_tag) {
1404 CHECK(!base::Contains(pending_callback_map_, handle));
1405 pending_callback_map_[handle] = CallbackResultPair(std::move(callback), rv);
1406 if (rv == OK) {
1407 handle->socket()->ApplySocketTag(socket_tag);
1408 }
1409 base::SingleThreadTaskRunner::GetCurrentDefault()->PostTask(
1410 FROM_HERE, base::BindOnce(&TransportClientSocketPool::InvokeUserCallback,
1411 weak_factory_.GetWeakPtr(),
1412 // This is safe as `handle` is checked against a
1413 // map to verify it's alive before dereference.
1414 // This code path must only be reachable by
1415 // `handle`s that have had Init called.
1416 base::UnsafeDangling(handle)));
1417 }
1418
InvokeUserCallback(MayBeDangling<ClientSocketHandle> handle)1419 void TransportClientSocketPool::InvokeUserCallback(
1420 MayBeDangling<ClientSocketHandle> handle) {
1421 auto it = pending_callback_map_.find(handle);
1422
1423 // Exit if the request has already been cancelled.
1424 if (it == pending_callback_map_.end())
1425 return;
1426
1427 CHECK(!handle->is_initialized());
1428 CompletionOnceCallback callback = std::move(it->second.callback);
1429 int result = it->second.result;
1430 pending_callback_map_.erase(it);
1431 std::move(callback).Run(result);
1432 }
1433
TryToCloseSocketsInLayeredPools()1434 void TransportClientSocketPool::TryToCloseSocketsInLayeredPools() {
1435 while (IsStalled()) {
1436 // Closing a socket will result in calling back into |this| to use the freed
1437 // socket slot, so nothing else is needed.
1438 if (!CloseOneIdleConnectionInHigherLayeredPool())
1439 return;
1440 }
1441 }
1442
1443 TransportClientSocketPool::GroupMap::iterator
RefreshGroup(GroupMap::iterator it,const base::TimeTicks & now,const char * net_log_reason_utf8)1444 TransportClientSocketPool::RefreshGroup(GroupMap::iterator it,
1445 const base::TimeTicks& now,
1446 const char* net_log_reason_utf8) {
1447 Group* group = it->second;
1448 CHECK(group);
1449 CleanupIdleSocketsInGroup(true /* force */, group, now, net_log_reason_utf8);
1450
1451 connecting_socket_count_ -= group->jobs().size();
1452 group->RemoveAllUnboundJobs();
1453
1454 // Otherwise, prevent reuse of existing sockets.
1455 group->IncrementGeneration();
1456
1457 // Delete group if no longer needed.
1458 if (group->IsEmpty()) {
1459 return RemoveGroup(it);
1460 }
1461 return ++it;
1462 }
1463
Group(const GroupId & group_id,TransportClientSocketPool * client_socket_pool)1464 TransportClientSocketPool::Group::Group(
1465 const GroupId& group_id,
1466 TransportClientSocketPool* client_socket_pool)
1467 : group_id_(group_id),
1468 client_socket_pool_(client_socket_pool),
1469 unbound_requests_(NUM_PRIORITIES) {}
1470
~Group()1471 TransportClientSocketPool::Group::~Group() {
1472 DCHECK_EQ(0u, never_assigned_job_count());
1473 DCHECK_EQ(0u, unassigned_job_count());
1474 DCHECK(unbound_requests_.empty());
1475 DCHECK(jobs_.empty());
1476 DCHECK(bound_requests_.empty());
1477 }
1478
OnConnectJobComplete(int result,ConnectJob * job)1479 void TransportClientSocketPool::Group::OnConnectJobComplete(int result,
1480 ConnectJob* job) {
1481 DCHECK_NE(ERR_IO_PENDING, result);
1482 client_socket_pool_->OnConnectJobComplete(this, result, job);
1483 }
1484
OnNeedsProxyAuth(const HttpResponseInfo & response,HttpAuthController * auth_controller,base::OnceClosure restart_with_auth_callback,ConnectJob * job)1485 void TransportClientSocketPool::Group::OnNeedsProxyAuth(
1486 const HttpResponseInfo& response,
1487 HttpAuthController* auth_controller,
1488 base::OnceClosure restart_with_auth_callback,
1489 ConnectJob* job) {
1490 client_socket_pool_->OnNeedsProxyAuth(this, response, auth_controller,
1491 std::move(restart_with_auth_callback),
1492 job);
1493 }
1494
StartBackupJobTimer(const GroupId & group_id)1495 void TransportClientSocketPool::Group::StartBackupJobTimer(
1496 const GroupId& group_id) {
1497 // Only allow one timer to run at a time.
1498 if (BackupJobTimerIsRunning())
1499 return;
1500
1501 // Unretained here is okay because |backup_job_timer_| is
1502 // automatically cancelled when it's destroyed.
1503 backup_job_timer_.Start(FROM_HERE,
1504 client_socket_pool_->ConnectRetryInterval(),
1505 base::BindOnce(&Group::OnBackupJobTimerFired,
1506 base::Unretained(this), group_id));
1507 }
1508
BackupJobTimerIsRunning() const1509 bool TransportClientSocketPool::Group::BackupJobTimerIsRunning() const {
1510 return backup_job_timer_.IsRunning();
1511 }
1512
TryToUseNeverAssignedConnectJob()1513 bool TransportClientSocketPool::Group::TryToUseNeverAssignedConnectJob() {
1514 SanityCheck();
1515
1516 if (never_assigned_job_count_ == 0)
1517 return false;
1518 --never_assigned_job_count_;
1519 return true;
1520 }
1521
AddJob(std::unique_ptr<ConnectJob> job,bool is_preconnect)1522 void TransportClientSocketPool::Group::AddJob(std::unique_ptr<ConnectJob> job,
1523 bool is_preconnect) {
1524 SanityCheck();
1525
1526 if (is_preconnect)
1527 ++never_assigned_job_count_;
1528 jobs_.push_back(std::move(job));
1529 TryToAssignUnassignedJob(jobs_.back().get());
1530
1531 SanityCheck();
1532 }
1533
RemoveUnboundJob(ConnectJob * job)1534 std::unique_ptr<ConnectJob> TransportClientSocketPool::Group::RemoveUnboundJob(
1535 ConnectJob* job) {
1536 SanityCheck();
1537
1538 // Check that |job| is in the list.
1539 auto it = base::ranges::find(jobs_, job, &std::unique_ptr<ConnectJob>::get);
1540 DCHECK(it != jobs_.end());
1541
1542 // Check if |job| is in the unassigned jobs list. If so, remove it.
1543 auto it2 = base::ranges::find(unassigned_jobs_, job);
1544 if (it2 != unassigned_jobs_.end()) {
1545 unassigned_jobs_.erase(it2);
1546 } else {
1547 // Otherwise, |job| must be assigned to some Request. Unassign it, then
1548 // try to replace it with another job if possible (either by taking an
1549 // unassigned job or stealing from another request, if any requests after it
1550 // have a job).
1551 RequestQueue::Pointer request_with_job = FindUnboundRequestWithJob(job);
1552 DCHECK(!request_with_job.is_null());
1553 request_with_job.value()->ReleaseJob();
1554 TryToAssignJobToRequest(request_with_job);
1555 }
1556 std::unique_ptr<ConnectJob> owned_job = std::move(*it);
1557 jobs_.erase(it);
1558
1559 size_t job_count = jobs_.size();
1560 if (job_count < never_assigned_job_count_)
1561 never_assigned_job_count_ = job_count;
1562
1563 // If we've got no more jobs for this group, then we no longer need a
1564 // backup job either.
1565 if (jobs_.empty()) {
1566 DCHECK(unassigned_jobs_.empty());
1567 backup_job_timer_.Stop();
1568 }
1569
1570 SanityCheck();
1571 return owned_job;
1572 }
1573
OnBackupJobTimerFired(const GroupId & group_id)1574 void TransportClientSocketPool::Group::OnBackupJobTimerFired(
1575 const GroupId& group_id) {
1576 // If there are no more jobs pending, there is no work to do.
1577 // If we've done our cleanups correctly, this should not happen.
1578 if (jobs_.empty()) {
1579 NOTREACHED();
1580 return;
1581 }
1582
1583 // If the old job has already established a connection, don't start a backup
1584 // job. Backup jobs are only for issues establishing the initial TCP
1585 // connection - the timeout they used is tuned for that, and tests expect that
1586 // behavior.
1587 //
1588 // TODO(https://crbug.com/929814): Replace both this and the
1589 // LOAD_STATE_RESOLVING_HOST check with a callback. Use the
1590 // LOAD_STATE_RESOLVING_HOST callback to start the timer (And invoke the
1591 // OnHostResolved callback of any pending requests), and the
1592 // HasEstablishedConnection() callback to stop the timer. That should result
1593 // in a more robust, testable API.
1594 if ((*jobs_.begin())->HasEstablishedConnection())
1595 return;
1596
1597 // If our old job is waiting on DNS, or if we can't create any sockets
1598 // right now due to limits, just reset the timer.
1599 if (client_socket_pool_->ReachedMaxSocketsLimit() ||
1600 !HasAvailableSocketSlot(client_socket_pool_->max_sockets_per_group_) ||
1601 (*jobs_.begin())->GetLoadState() == LOAD_STATE_RESOLVING_HOST) {
1602 StartBackupJobTimer(group_id);
1603 return;
1604 }
1605
1606 if (unbound_requests_.empty())
1607 return;
1608
1609 Request* request = unbound_requests_.FirstMax().value().get();
1610 std::unique_ptr<ConnectJob> owned_backup_job =
1611 client_socket_pool_->CreateConnectJob(
1612 group_id, request->socket_params(),
1613 client_socket_pool_->proxy_server_, request->proxy_annotation_tag(),
1614 request->priority(), request->socket_tag(), this);
1615 owned_backup_job->net_log().AddEvent(
1616 NetLogEventType::SOCKET_POOL_CONNECT_JOB_CREATED, [&] {
1617 return NetLogCreateConnectJobParams(true /* backup_job */, &group_id_);
1618 });
1619 ConnectJob* backup_job = owned_backup_job.get();
1620 AddJob(std::move(owned_backup_job), false);
1621 client_socket_pool_->connecting_socket_count_++;
1622 int rv = backup_job->Connect();
1623 if (rv != ERR_IO_PENDING) {
1624 client_socket_pool_->OnConnectJobComplete(this, rv, backup_job);
1625 }
1626 }
1627
SanityCheck() const1628 void TransportClientSocketPool::Group::SanityCheck() const {
1629 #if DCHECK_IS_ON()
1630 DCHECK_LE(never_assigned_job_count(), jobs_.size());
1631 DCHECK_LE(unassigned_job_count(), jobs_.size());
1632
1633 // Check that |unassigned_jobs_| is empty iff there are at least as many
1634 // requests as jobs.
1635 DCHECK_EQ(unassigned_jobs_.empty(), jobs_.size() <= unbound_requests_.size());
1636
1637 size_t num_assigned_jobs = jobs_.size() - unassigned_jobs_.size();
1638
1639 RequestQueue::Pointer pointer = unbound_requests_.FirstMax();
1640 for (size_t i = 0; i < unbound_requests_.size();
1641 ++i, pointer = unbound_requests_.GetNextTowardsLastMin(pointer)) {
1642 DCHECK(!pointer.is_null());
1643 DCHECK(pointer.value());
1644 // Check that the first |num_assigned_jobs| requests have valid job
1645 // assignments.
1646 if (i < num_assigned_jobs) {
1647 // The request has a job.
1648 ConnectJob* job = pointer.value()->job();
1649 DCHECK(job);
1650 // The request's job is not in |unassigned_jobs_|
1651 DCHECK(!base::Contains(unassigned_jobs_, job));
1652 // The request's job is in |jobs_|
1653 DCHECK(base::Contains(jobs_, job, &std::unique_ptr<ConnectJob>::get));
1654 // The same job is not assigned to any other request with a job.
1655 RequestQueue::Pointer pointer2 =
1656 unbound_requests_.GetNextTowardsLastMin(pointer);
1657 for (size_t j = i + 1; j < num_assigned_jobs;
1658 ++j, pointer2 = unbound_requests_.GetNextTowardsLastMin(pointer2)) {
1659 DCHECK(!pointer2.is_null());
1660 ConnectJob* job2 = pointer2.value()->job();
1661 DCHECK(job2);
1662 DCHECK_NE(job, job2);
1663 }
1664 DCHECK_EQ(pointer.value()->priority(), job->priority());
1665 } else {
1666 // Check that any subsequent requests do not have a job.
1667 DCHECK(!pointer.value()->job());
1668 }
1669 }
1670
1671 for (auto it = unassigned_jobs_.begin(); it != unassigned_jobs_.end(); ++it) {
1672 // Check that all unassigned jobs are in |jobs_|
1673 ConnectJob* job = *it;
1674 DCHECK(base::Contains(jobs_, job, &std::unique_ptr<ConnectJob>::get));
1675 // Check that there are no duplicated entries in |unassigned_jobs_|
1676 for (auto it2 = std::next(it); it2 != unassigned_jobs_.end(); ++it2) {
1677 DCHECK_NE(job, *it2);
1678 }
1679
1680 // Check that no |unassigned_jobs_| are in |bound_requests_|.
1681 DCHECK(!base::Contains(bound_requests_, job,
1682 [](const BoundRequest& bound_request) {
1683 return bound_request.connect_job.get();
1684 }));
1685 }
1686 #endif
1687 }
1688
RemoveAllUnboundJobs()1689 void TransportClientSocketPool::Group::RemoveAllUnboundJobs() {
1690 SanityCheck();
1691
1692 // Remove jobs from any requests that have them.
1693 if (!unbound_requests_.empty()) {
1694 for (RequestQueue::Pointer pointer = unbound_requests_.FirstMax();
1695 !pointer.is_null() && pointer.value()->job();
1696 pointer = unbound_requests_.GetNextTowardsLastMin(pointer)) {
1697 pointer.value()->ReleaseJob();
1698 }
1699 }
1700 unassigned_jobs_.clear();
1701 never_assigned_job_count_ = 0;
1702
1703 // Diagnostics check for crbug.com/1231248. `Group`s are deleted only on
1704 // removal from `TransportClientSocketPool::group_map_`, so if this check
1705 // fails, `this` has been deleted, likely through some reentrancy issue.
1706 CHECK(client_socket_pool_->HasGroup(group_id_));
1707
1708 // Delete active jobs.
1709 jobs_.clear();
1710 // Stop backup job timer.
1711 backup_job_timer_.Stop();
1712
1713 SanityCheck();
1714 }
1715
ConnectJobCount() const1716 size_t TransportClientSocketPool::Group::ConnectJobCount() const {
1717 return bound_requests_.size() + jobs_.size();
1718 }
1719
GetConnectJobForHandle(const ClientSocketHandle * handle) const1720 ConnectJob* TransportClientSocketPool::Group::GetConnectJobForHandle(
1721 const ClientSocketHandle* handle) const {
1722 // Search through bound requests for |handle|.
1723 for (const auto& bound_pair : bound_requests_) {
1724 if (handle == bound_pair.request->handle())
1725 return bound_pair.connect_job.get();
1726 }
1727
1728 // Search through the unbound requests that have corresponding jobs for a
1729 // request with |handle|.
1730 for (RequestQueue::Pointer pointer = unbound_requests_.FirstMax();
1731 !pointer.is_null() && pointer.value()->job();
1732 pointer = unbound_requests_.GetNextTowardsLastMin(pointer)) {
1733 if (pointer.value()->handle() == handle)
1734 return pointer.value()->job();
1735 }
1736
1737 return nullptr;
1738 }
1739
InsertUnboundRequest(std::unique_ptr<Request> request)1740 void TransportClientSocketPool::Group::InsertUnboundRequest(
1741 std::unique_ptr<Request> request) {
1742 SanityCheck();
1743
1744 // Should not have a job because it is not already in |unbound_requests_|
1745 DCHECK(!request->job());
1746 // This value must be cached before we release |request|.
1747 RequestPriority priority = request->priority();
1748
1749 RequestQueue::Pointer new_position;
1750 if (request->respect_limits() == RespectLimits::DISABLED) {
1751 // Put requests with RespectLimits::DISABLED (which should have
1752 // priority == MAXIMUM_PRIORITY) ahead of other requests with
1753 // MAXIMUM_PRIORITY.
1754 DCHECK_EQ(priority, MAXIMUM_PRIORITY);
1755 new_position =
1756 unbound_requests_.InsertAtFront(std::move(request), priority);
1757 } else {
1758 new_position = unbound_requests_.Insert(std::move(request), priority);
1759 }
1760 DCHECK(!unbound_requests_.empty());
1761
1762 TryToAssignJobToRequest(new_position);
1763
1764 SanityCheck();
1765 }
1766
1767 const TransportClientSocketPool::Request*
GetNextUnboundRequest() const1768 TransportClientSocketPool::Group::GetNextUnboundRequest() const {
1769 return unbound_requests_.empty() ? nullptr
1770 : unbound_requests_.FirstMax().value().get();
1771 }
1772
1773 std::unique_ptr<TransportClientSocketPool::Request>
PopNextUnboundRequest()1774 TransportClientSocketPool::Group::PopNextUnboundRequest() {
1775 if (unbound_requests_.empty())
1776 return nullptr;
1777 return RemoveUnboundRequest(unbound_requests_.FirstMax());
1778 }
1779
1780 std::unique_ptr<TransportClientSocketPool::Request>
FindAndRemoveUnboundRequest(ClientSocketHandle * handle)1781 TransportClientSocketPool::Group::FindAndRemoveUnboundRequest(
1782 ClientSocketHandle* handle) {
1783 for (RequestQueue::Pointer pointer = unbound_requests_.FirstMax();
1784 !pointer.is_null();
1785 pointer = unbound_requests_.GetNextTowardsLastMin(pointer)) {
1786 if (pointer.value()->handle() == handle) {
1787 DCHECK_EQ(static_cast<RequestPriority>(pointer.priority()),
1788 pointer.value()->priority());
1789 std::unique_ptr<Request> request = RemoveUnboundRequest(pointer);
1790 return request;
1791 }
1792 }
1793 return nullptr;
1794 }
1795
SetPendingErrorForAllBoundRequests(int pending_error)1796 void TransportClientSocketPool::Group::SetPendingErrorForAllBoundRequests(
1797 int pending_error) {
1798 for (auto& bound_request : bound_requests_) {
1799 // Earlier errors take precedence.
1800 if (bound_request.pending_error == OK)
1801 bound_request.pending_error = pending_error;
1802 }
1803 }
1804
1805 const TransportClientSocketPool::Request*
BindRequestToConnectJob(ConnectJob * connect_job)1806 TransportClientSocketPool::Group::BindRequestToConnectJob(
1807 ConnectJob* connect_job) {
1808 // Check if |job| is already bound to a Request.
1809 for (const auto& bound_pair : bound_requests_) {
1810 if (bound_pair.connect_job.get() == connect_job)
1811 return bound_pair.request.get();
1812 }
1813
1814 // If not, try to bind it to a Request.
1815 const Request* request = GetNextUnboundRequest();
1816 // If there are no pending requests, or the highest priority request has no
1817 // callback to handle auth challenges, return nullptr.
1818 if (!request || request->proxy_auth_callback().is_null())
1819 return nullptr;
1820
1821 // Otherwise, bind the ConnectJob to the Request.
1822 std::unique_ptr<Request> owned_request = PopNextUnboundRequest();
1823 DCHECK_EQ(owned_request.get(), request);
1824 std::unique_ptr<ConnectJob> owned_connect_job = RemoveUnboundJob(connect_job);
1825 LogBoundConnectJobToRequest(owned_connect_job->net_log().source(), *request);
1826 bound_requests_.emplace_back(BoundRequest(
1827 std::move(owned_connect_job), std::move(owned_request), generation()));
1828 return request;
1829 }
1830
1831 absl::optional<TransportClientSocketPool::Group::BoundRequest>
FindAndRemoveBoundRequestForConnectJob(ConnectJob * connect_job)1832 TransportClientSocketPool::Group::FindAndRemoveBoundRequestForConnectJob(
1833 ConnectJob* connect_job) {
1834 for (auto bound_pair = bound_requests_.begin();
1835 bound_pair != bound_requests_.end(); ++bound_pair) {
1836 if (bound_pair->connect_job.get() != connect_job)
1837 continue;
1838 BoundRequest ret = std::move(*bound_pair);
1839 bound_requests_.erase(bound_pair);
1840 return std::move(ret);
1841 }
1842 return absl::nullopt;
1843 }
1844
1845 std::unique_ptr<TransportClientSocketPool::Request>
FindAndRemoveBoundRequest(ClientSocketHandle * client_socket_handle)1846 TransportClientSocketPool::Group::FindAndRemoveBoundRequest(
1847 ClientSocketHandle* client_socket_handle) {
1848 for (auto bound_pair = bound_requests_.begin();
1849 bound_pair != bound_requests_.end(); ++bound_pair) {
1850 if (bound_pair->request->handle() != client_socket_handle)
1851 continue;
1852 std::unique_ptr<Request> request = std::move(bound_pair->request);
1853 bound_requests_.erase(bound_pair);
1854 return request;
1855 }
1856 return nullptr;
1857 }
1858
SetPriority(ClientSocketHandle * handle,RequestPriority priority)1859 void TransportClientSocketPool::Group::SetPriority(ClientSocketHandle* handle,
1860 RequestPriority priority) {
1861 for (RequestQueue::Pointer pointer = unbound_requests_.FirstMax();
1862 !pointer.is_null();
1863 pointer = unbound_requests_.GetNextTowardsLastMin(pointer)) {
1864 if (pointer.value()->handle() == handle) {
1865 if (pointer.value()->priority() == priority)
1866 return;
1867
1868 std::unique_ptr<Request> request = RemoveUnboundRequest(pointer);
1869
1870 // Requests that ignore limits much be created and remain at the highest
1871 // priority, and should not be reprioritized.
1872 DCHECK_EQ(request->respect_limits(), RespectLimits::ENABLED);
1873
1874 request->set_priority(priority);
1875 InsertUnboundRequest(std::move(request));
1876 return;
1877 }
1878 }
1879
1880 // This function must be called with a valid ClientSocketHandle.
1881 NOTREACHED();
1882 }
1883
RequestWithHandleHasJobForTesting(const ClientSocketHandle * handle) const1884 bool TransportClientSocketPool::Group::RequestWithHandleHasJobForTesting(
1885 const ClientSocketHandle* handle) const {
1886 SanityCheck();
1887 if (GetConnectJobForHandle(handle))
1888 return true;
1889
1890 // There's no corresponding ConnectJob. Verify that the handle is at least
1891 // owned by a request.
1892 RequestQueue::Pointer pointer = unbound_requests_.FirstMax();
1893 for (size_t i = 0; i < unbound_requests_.size(); ++i) {
1894 if (pointer.value()->handle() == handle)
1895 return false;
1896 pointer = unbound_requests_.GetNextTowardsLastMin(pointer);
1897 }
1898 NOTREACHED();
1899 return false;
1900 }
1901
BoundRequest()1902 TransportClientSocketPool::Group::BoundRequest::BoundRequest()
1903 : pending_error(OK) {}
1904
BoundRequest(std::unique_ptr<ConnectJob> connect_job,std::unique_ptr<Request> request,int64_t generation)1905 TransportClientSocketPool::Group::BoundRequest::BoundRequest(
1906 std::unique_ptr<ConnectJob> connect_job,
1907 std::unique_ptr<Request> request,
1908 int64_t generation)
1909 : connect_job(std::move(connect_job)),
1910 request(std::move(request)),
1911 generation(generation),
1912 pending_error(OK) {}
1913
1914 TransportClientSocketPool::Group::BoundRequest::BoundRequest(
1915 BoundRequest&& other) = default;
1916
1917 TransportClientSocketPool::Group::BoundRequest&
1918 TransportClientSocketPool::Group::BoundRequest::operator=(
1919 BoundRequest&& other) = default;
1920
1921 TransportClientSocketPool::Group::BoundRequest::~BoundRequest() = default;
1922
1923 std::unique_ptr<TransportClientSocketPool::Request>
RemoveUnboundRequest(const RequestQueue::Pointer & pointer)1924 TransportClientSocketPool::Group::RemoveUnboundRequest(
1925 const RequestQueue::Pointer& pointer) {
1926 SanityCheck();
1927
1928 std::unique_ptr<Request> request = unbound_requests_.Erase(pointer);
1929 if (request->job()) {
1930 TryToAssignUnassignedJob(request->ReleaseJob());
1931 }
1932 // If there are no more unbound requests, kill the backup timer.
1933 if (unbound_requests_.empty())
1934 backup_job_timer_.Stop();
1935
1936 SanityCheck();
1937 return request;
1938 }
1939
1940 TransportClientSocketPool::RequestQueue::Pointer
FindUnboundRequestWithJob(const ConnectJob * job) const1941 TransportClientSocketPool::Group::FindUnboundRequestWithJob(
1942 const ConnectJob* job) const {
1943 SanityCheck();
1944
1945 for (RequestQueue::Pointer pointer = unbound_requests_.FirstMax();
1946 !pointer.is_null() && pointer.value()->job();
1947 pointer = unbound_requests_.GetNextTowardsLastMin(pointer)) {
1948 if (pointer.value()->job() == job)
1949 return pointer;
1950 }
1951 // If a request with the job was not found, it must be in |unassigned_jobs_|.
1952 DCHECK(base::Contains(unassigned_jobs_, job));
1953 return RequestQueue::Pointer();
1954 }
1955
1956 TransportClientSocketPool::RequestQueue::Pointer
GetFirstRequestWithoutJob() const1957 TransportClientSocketPool::Group::GetFirstRequestWithoutJob() const {
1958 RequestQueue::Pointer pointer = unbound_requests_.FirstMax();
1959 size_t i = 0;
1960 for (; !pointer.is_null() && pointer.value()->job();
1961 pointer = unbound_requests_.GetNextTowardsLastMin(pointer)) {
1962 ++i;
1963 }
1964 DCHECK_EQ(i, jobs_.size() - unassigned_jobs_.size());
1965 DCHECK(pointer.is_null() || !pointer.value()->job());
1966 return pointer;
1967 }
1968
TryToAssignUnassignedJob(ConnectJob * job)1969 void TransportClientSocketPool::Group::TryToAssignUnassignedJob(
1970 ConnectJob* job) {
1971 unassigned_jobs_.push_back(job);
1972 RequestQueue::Pointer first_request_without_job = GetFirstRequestWithoutJob();
1973 if (!first_request_without_job.is_null()) {
1974 first_request_without_job.value()->AssignJob(unassigned_jobs_.back());
1975 unassigned_jobs_.pop_back();
1976 }
1977 }
1978
TryToAssignJobToRequest(TransportClientSocketPool::RequestQueue::Pointer request_pointer)1979 void TransportClientSocketPool::Group::TryToAssignJobToRequest(
1980 TransportClientSocketPool::RequestQueue::Pointer request_pointer) {
1981 DCHECK(!request_pointer.value()->job());
1982 if (!unassigned_jobs_.empty()) {
1983 request_pointer.value()->AssignJob(unassigned_jobs_.front());
1984 unassigned_jobs_.pop_front();
1985 return;
1986 }
1987
1988 // If the next request in the queue does not have a job, then there are no
1989 // requests with a job after |request_pointer| from which we can steal.
1990 RequestQueue::Pointer next_request =
1991 unbound_requests_.GetNextTowardsLastMin(request_pointer);
1992 if (next_request.is_null() || !next_request.value()->job())
1993 return;
1994
1995 // Walk down the queue to find the last request with a job.
1996 RequestQueue::Pointer cur = next_request;
1997 RequestQueue::Pointer next = unbound_requests_.GetNextTowardsLastMin(cur);
1998 while (!next.is_null() && next.value()->job()) {
1999 cur = next;
2000 next = unbound_requests_.GetNextTowardsLastMin(next);
2001 }
2002 // Steal the job from the last request with a job.
2003 TransferJobBetweenRequests(cur.value().get(), request_pointer.value().get());
2004 }
2005
TransferJobBetweenRequests(TransportClientSocketPool::Request * source,TransportClientSocketPool::Request * dest)2006 void TransportClientSocketPool::Group::TransferJobBetweenRequests(
2007 TransportClientSocketPool::Request* source,
2008 TransportClientSocketPool::Request* dest) {
2009 DCHECK(!dest->job());
2010 DCHECK(source->job());
2011 dest->AssignJob(source->ReleaseJob());
2012 }
2013
2014 } // namespace net
2015