1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/browser/loader/resource_scheduler.h"
6
7 #include "base/stl_util.h"
8 #include "content/common/resource_messages.h"
9 #include "content/browser/loader/resource_message_delegate.h"
10 #include "content/public/browser/resource_controller.h"
11 #include "content/public/browser/resource_request_info.h"
12 #include "content/public/browser/resource_throttle.h"
13 #include "ipc/ipc_message_macros.h"
14 #include "net/base/host_port_pair.h"
15 #include "net/base/load_flags.h"
16 #include "net/base/request_priority.h"
17 #include "net/http/http_server_properties.h"
18 #include "net/url_request/url_request.h"
19 #include "net/url_request/url_request_context.h"
20
21 namespace content {
22
23 static const size_t kMaxNumDelayableRequestsPerClient = 10;
24 static const size_t kMaxNumDelayableRequestsPerHost = 6;
25
26 // A thin wrapper around net::PriorityQueue that deals with
27 // ScheduledResourceRequests instead of PriorityQueue::Pointers.
28 class ResourceScheduler::RequestQueue {
29 private:
30 typedef net::PriorityQueue<ScheduledResourceRequest*> NetQueue;
31
32 public:
33 class Iterator {
34 public:
Iterator(NetQueue * queue)35 Iterator(NetQueue* queue) : queue_(queue) {
36 DCHECK(queue != NULL);
37 current_pointer_ = queue_->FirstMax();
38 }
39
operator ++()40 Iterator& operator++() {
41 current_pointer_ = queue_->GetNextTowardsLastMin(current_pointer_);
42 return *this;
43 }
44
operator ++(int)45 Iterator operator++(int) {
46 Iterator result(*this);
47 ++(*this);
48 return result;
49 }
50
value()51 ScheduledResourceRequest* value() {
52 return current_pointer_.value();
53 }
54
is_null()55 bool is_null() {
56 return current_pointer_.is_null();
57 }
58
59 private:
60 NetQueue* queue_;
61 NetQueue::Pointer current_pointer_;
62 };
63
RequestQueue()64 RequestQueue() : queue_(net::NUM_PRIORITIES) {}
~RequestQueue()65 ~RequestQueue() {}
66
67 // Adds |request| to the queue with given |priority|.
Insert(ScheduledResourceRequest * request,net::RequestPriority priority)68 void Insert(ScheduledResourceRequest* request,
69 net::RequestPriority priority) {
70 DCHECK(!ContainsKey(pointers_, request));
71 NetQueue::Pointer pointer = queue_.Insert(request, priority);
72 pointers_[request] = pointer;
73 }
74
75 // Removes |request| from the queue.
Erase(ScheduledResourceRequest * request)76 void Erase(ScheduledResourceRequest* request) {
77 PointerMap::iterator it = pointers_.find(request);
78 DCHECK(it != pointers_.end());
79 if (it == pointers_.end())
80 return;
81 queue_.Erase(it->second);
82 pointers_.erase(it);
83 }
84
85 // Returns the highest priority request that's queued, or NULL if none are.
FirstMax()86 ScheduledResourceRequest* FirstMax() {
87 return queue_.FirstMax().value();
88 }
89
GetNextHighestIterator()90 Iterator GetNextHighestIterator() {
91 return Iterator(&queue_);
92 }
93
94 // Returns true if |request| is queued.
IsQueued(ScheduledResourceRequest * request) const95 bool IsQueued(ScheduledResourceRequest* request) const {
96 return ContainsKey(pointers_, request);
97 }
98
99 // Returns true if no requests are queued.
IsEmpty() const100 bool IsEmpty() const { return queue_.size() == 0; }
101
102 private:
103 typedef std::map<ScheduledResourceRequest*, NetQueue::Pointer> PointerMap;
104
105 NetQueue queue_;
106 PointerMap pointers_;
107 };
108
109 // This is the handle we return to the ResourceDispatcherHostImpl so it can
110 // interact with the request.
111 class ResourceScheduler::ScheduledResourceRequest
112 : public ResourceMessageDelegate,
113 public ResourceThrottle {
114 public:
ScheduledResourceRequest(const ClientId & client_id,net::URLRequest * request,ResourceScheduler * scheduler)115 ScheduledResourceRequest(const ClientId& client_id,
116 net::URLRequest* request,
117 ResourceScheduler* scheduler)
118 : ResourceMessageDelegate(request),
119 client_id_(client_id),
120 request_(request),
121 ready_(false),
122 deferred_(false),
123 scheduler_(scheduler) {
124 TRACE_EVENT_ASYNC_BEGIN1("net", "URLRequest", request_,
125 "url", request->url().spec());
126 }
127
~ScheduledResourceRequest()128 virtual ~ScheduledResourceRequest() {
129 scheduler_->RemoveRequest(this);
130 }
131
Start()132 void Start() {
133 TRACE_EVENT_ASYNC_STEP_PAST0("net", "URLRequest", request_, "Queued");
134 ready_ = true;
135 if (deferred_ && request_->status().is_success()) {
136 deferred_ = false;
137 controller()->Resume();
138 }
139 }
140
client_id() const141 const ClientId& client_id() const { return client_id_; }
url_request()142 net::URLRequest* url_request() { return request_; }
url_request() const143 const net::URLRequest* url_request() const { return request_; }
144
145 private:
146 // ResourceMessageDelegate interface:
OnMessageReceived(const IPC::Message & message,bool * message_was_ok)147 virtual bool OnMessageReceived(const IPC::Message& message,
148 bool* message_was_ok) OVERRIDE {
149 bool handled = true;
150 IPC_BEGIN_MESSAGE_MAP_EX(ScheduledResourceRequest, message, *message_was_ok)
151 IPC_MESSAGE_HANDLER(ResourceHostMsg_DidChangePriority, DidChangePriority)
152 IPC_MESSAGE_UNHANDLED(handled = false)
153 IPC_END_MESSAGE_MAP_EX()
154 return handled;
155 }
156
157 // ResourceThrottle interface:
WillStartRequest(bool * defer)158 virtual void WillStartRequest(bool* defer) OVERRIDE {
159 deferred_ = *defer = !ready_;
160 }
161
GetNameForLogging() const162 virtual const char* GetNameForLogging() const OVERRIDE {
163 return "ResourceScheduler";
164 }
165
DidChangePriority(int request_id,net::RequestPriority new_priority)166 void DidChangePriority(int request_id, net::RequestPriority new_priority) {
167 scheduler_->ReprioritizeRequest(this, new_priority);
168 }
169
170 ClientId client_id_;
171 net::URLRequest* request_;
172 bool ready_;
173 bool deferred_;
174 ResourceScheduler* scheduler_;
175
176 DISALLOW_COPY_AND_ASSIGN(ScheduledResourceRequest);
177 };
178
179 // Each client represents a tab.
180 struct ResourceScheduler::Client {
Clientcontent::ResourceScheduler::Client181 Client() : has_body(false) {}
~Clientcontent::ResourceScheduler::Client182 ~Client() {}
183
184 bool has_body;
185 RequestQueue pending_requests;
186 RequestSet in_flight_requests;
187 };
188
ResourceScheduler()189 ResourceScheduler::ResourceScheduler() {
190 }
191
~ResourceScheduler()192 ResourceScheduler::~ResourceScheduler() {
193 DCHECK(unowned_requests_.empty());
194 DCHECK(client_map_.empty());
195 }
196
ScheduleRequest(int child_id,int route_id,net::URLRequest * url_request)197 scoped_ptr<ResourceThrottle> ResourceScheduler::ScheduleRequest(
198 int child_id,
199 int route_id,
200 net::URLRequest* url_request) {
201 DCHECK(CalledOnValidThread());
202 ClientId client_id = MakeClientId(child_id, route_id);
203 scoped_ptr<ScheduledResourceRequest> request(
204 new ScheduledResourceRequest(client_id, url_request, this));
205
206 ClientMap::iterator it = client_map_.find(client_id);
207 if (it == client_map_.end()) {
208 // There are several ways this could happen:
209 // 1. <a ping> requests don't have a route_id.
210 // 2. Most unittests don't send the IPCs needed to register Clients.
211 // 3. The tab is closed while a RequestResource IPC is in flight.
212 unowned_requests_.insert(request.get());
213 request->Start();
214 return request.PassAs<ResourceThrottle>();
215 }
216
217 Client* client = it->second;
218 if (ShouldStartRequest(request.get(), client) == START_REQUEST) {
219 StartRequest(request.get(), client);
220 } else {
221 client->pending_requests.Insert(request.get(), url_request->priority());
222 }
223 return request.PassAs<ResourceThrottle>();
224 }
225
RemoveRequest(ScheduledResourceRequest * request)226 void ResourceScheduler::RemoveRequest(ScheduledResourceRequest* request) {
227 DCHECK(CalledOnValidThread());
228 if (ContainsKey(unowned_requests_, request)) {
229 unowned_requests_.erase(request);
230 return;
231 }
232
233 ClientMap::iterator client_it = client_map_.find(request->client_id());
234 if (client_it == client_map_.end()) {
235 return;
236 }
237
238 Client* client = client_it->second;
239
240 if (client->pending_requests.IsQueued(request)) {
241 client->pending_requests.Erase(request);
242 DCHECK(!ContainsKey(client->in_flight_requests, request));
243 } else {
244 size_t erased = client->in_flight_requests.erase(request);
245 DCHECK(erased);
246
247 // Removing this request may have freed up another to load.
248 LoadAnyStartablePendingRequests(client);
249 }
250 }
251
OnClientCreated(int child_id,int route_id)252 void ResourceScheduler::OnClientCreated(int child_id, int route_id) {
253 DCHECK(CalledOnValidThread());
254 ClientId client_id = MakeClientId(child_id, route_id);
255 DCHECK(!ContainsKey(client_map_, client_id));
256
257 client_map_[client_id] = new Client;
258 }
259
OnClientDeleted(int child_id,int route_id)260 void ResourceScheduler::OnClientDeleted(int child_id, int route_id) {
261 DCHECK(CalledOnValidThread());
262 ClientId client_id = MakeClientId(child_id, route_id);
263 DCHECK(ContainsKey(client_map_, client_id));
264 ClientMap::iterator it = client_map_.find(client_id);
265 if (it == client_map_.end())
266 return;
267
268 Client* client = it->second;
269
270 // FYI, ResourceDispatcherHost cancels all of the requests after this function
271 // is called. It should end up canceling all of the requests except for a
272 // cross-renderer navigation.
273 for (RequestSet::iterator it = client->in_flight_requests.begin();
274 it != client->in_flight_requests.end(); ++it) {
275 unowned_requests_.insert(*it);
276 }
277 client->in_flight_requests.clear();
278
279 delete client;
280 client_map_.erase(it);
281 }
282
OnNavigate(int child_id,int route_id)283 void ResourceScheduler::OnNavigate(int child_id, int route_id) {
284 DCHECK(CalledOnValidThread());
285 ClientId client_id = MakeClientId(child_id, route_id);
286
287 ClientMap::iterator it = client_map_.find(client_id);
288 if (it == client_map_.end()) {
289 // The client was likely deleted shortly before we received this IPC.
290 return;
291 }
292
293 Client* client = it->second;
294 client->has_body = false;
295 }
296
OnWillInsertBody(int child_id,int route_id)297 void ResourceScheduler::OnWillInsertBody(int child_id, int route_id) {
298 DCHECK(CalledOnValidThread());
299 ClientId client_id = MakeClientId(child_id, route_id);
300
301 ClientMap::iterator it = client_map_.find(client_id);
302 if (it == client_map_.end()) {
303 // The client was likely deleted shortly before we received this IPC.
304 return;
305 }
306
307 Client* client = it->second;
308 client->has_body = false;
309 if (!client->has_body) {
310 client->has_body = true;
311 LoadAnyStartablePendingRequests(client);
312 }
313 }
314
StartRequest(ScheduledResourceRequest * request,Client * client)315 void ResourceScheduler::StartRequest(ScheduledResourceRequest* request,
316 Client* client) {
317 client->in_flight_requests.insert(request);
318 request->Start();
319 }
320
ReprioritizeRequest(ScheduledResourceRequest * request,net::RequestPriority new_priority)321 void ResourceScheduler::ReprioritizeRequest(ScheduledResourceRequest* request,
322 net::RequestPriority new_priority) {
323 if (request->url_request()->load_flags() & net::LOAD_IGNORE_LIMITS) {
324 // We should not be re-prioritizing requests with the
325 // IGNORE_LIMITS flag.
326 NOTREACHED();
327 return;
328 }
329 net::RequestPriority old_priority = request->url_request()->priority();
330 DCHECK_NE(new_priority, old_priority);
331 request->url_request()->SetPriority(new_priority);
332 ClientMap::iterator client_it = client_map_.find(request->client_id());
333 if (client_it == client_map_.end()) {
334 // The client was likely deleted shortly before we received this IPC.
335 return;
336 }
337
338 Client *client = client_it->second;
339 if (!client->pending_requests.IsQueued(request)) {
340 DCHECK(ContainsKey(client->in_flight_requests, request));
341 // Request has already started.
342 return;
343 }
344
345 client->pending_requests.Erase(request);
346 client->pending_requests.Insert(request,
347 request->url_request()->priority());
348
349 if (new_priority > old_priority) {
350 // Check if this request is now able to load at its new priority.
351 LoadAnyStartablePendingRequests(client);
352 }
353 }
354
LoadAnyStartablePendingRequests(Client * client)355 void ResourceScheduler::LoadAnyStartablePendingRequests(Client* client) {
356 // We iterate through all the pending requests, starting with the highest
357 // priority one. For each entry, one of three things can happen:
358 // 1) We start the request, remove it from the list, and keep checking.
359 // 2) We do NOT start the request, but ShouldStartRequest() signals us that
360 // there may be room for other requests, so we keep checking and leave
361 // the previous request still in the list.
362 // 3) We do not start the request, same as above, but StartRequest() tells
363 // us there's no point in checking any further requests.
364
365 RequestQueue::Iterator request_iter =
366 client->pending_requests.GetNextHighestIterator();
367
368 while (!request_iter.is_null()) {
369 ScheduledResourceRequest* request = request_iter.value();
370 ShouldStartReqResult query_result = ShouldStartRequest(request, client);
371
372 if (query_result == START_REQUEST) {
373 client->pending_requests.Erase(request);
374 StartRequest(request, client);
375
376 // StartRequest can modify the pending list, so we (re)start evaluation
377 // from the currently highest priority request. Avoid copying a singular
378 // iterator, which would trigger undefined behavior.
379 if (client->pending_requests.GetNextHighestIterator().is_null())
380 break;
381 request_iter = client->pending_requests.GetNextHighestIterator();
382 } else if (query_result == DO_NOT_START_REQUEST_AND_KEEP_SEARCHING) {
383 ++request_iter;
384 continue;
385 } else {
386 DCHECK(query_result == DO_NOT_START_REQUEST_AND_STOP_SEARCHING);
387 break;
388 }
389 }
390 }
391
GetNumDelayableRequestsInFlight(Client * client,const net::HostPortPair & active_request_host,size_t * total_delayable,size_t * total_for_active_host) const392 void ResourceScheduler::GetNumDelayableRequestsInFlight(
393 Client* client,
394 const net::HostPortPair& active_request_host,
395 size_t* total_delayable,
396 size_t* total_for_active_host) const {
397 DCHECK(client != NULL && total_delayable != NULL &&
398 total_for_active_host != NULL);
399
400 size_t total_delayable_count = 0;
401 size_t same_host_count = 0;
402 for (RequestSet::iterator it = client->in_flight_requests.begin();
403 it != client->in_flight_requests.end(); ++it) {
404 net::HostPortPair host_port_pair =
405 net::HostPortPair::FromURL((*it)->url_request()->url());
406
407 if (active_request_host.Equals(host_port_pair)) {
408 same_host_count++;
409 }
410
411 if ((*it)->url_request()->priority() < net::LOW) {
412 const net::HttpServerProperties& http_server_properties =
413 *(*it)->url_request()->context()->http_server_properties();
414
415 if (!http_server_properties.SupportsSpdy(host_port_pair)) {
416 ++total_delayable_count;
417 }
418 }
419 }
420 *total_delayable = total_delayable_count;
421 *total_for_active_host = same_host_count;
422 }
423
424 // ShouldStartRequest is the main scheduling algorithm.
425 //
426 // Requests are categorized into two categories:
427 //
428 // 1. Immediately issued requests, which are:
429 //
430 // * Higher priority requests (>= net::LOW).
431 // * Synchronous requests.
432 // * Requests to SPDY-capable origin servers.
433 // * Non-HTTP[S] requests.
434 //
435 // 2. The remainder are delayable requests, which follow these rules:
436 //
437 // * If no high priority requests are in flight, start loading low priority
438 // requests.
439 // * Once the renderer has a <body>, start loading delayable requests.
440 // * Never exceed 10 delayable requests in flight per client.
441 // * Never exceed 6 delayable requests for a given host.
442 // * Prior to <body>, allow one delayable request to load at a time.
ShouldStartRequest(ScheduledResourceRequest * request,Client * client) const443 ResourceScheduler::ShouldStartReqResult ResourceScheduler::ShouldStartRequest(
444 ScheduledResourceRequest* request,
445 Client* client) const {
446 const net::URLRequest& url_request = *request->url_request();
447
448 // TODO(simonjam): This may end up causing disk contention. We should
449 // experiment with throttling if that happens.
450 if (!url_request.url().SchemeIsHTTPOrHTTPS()) {
451 return START_REQUEST;
452 }
453
454 const net::HttpServerProperties& http_server_properties =
455 *url_request.context()->http_server_properties();
456
457 if (url_request.priority() >= net::LOW ||
458 !ResourceRequestInfo::ForRequest(&url_request)->IsAsync()) {
459 return START_REQUEST;
460 }
461
462 net::HostPortPair host_port_pair =
463 net::HostPortPair::FromURL(url_request.url());
464
465 // TODO(willchan): We should really improve this algorithm as described in
466 // crbug.com/164101. Also, theoretically we should not count a SPDY request
467 // against the delayable requests limit.
468 if (http_server_properties.SupportsSpdy(host_port_pair)) {
469 return START_REQUEST;
470 }
471
472 size_t num_delayable_requests_in_flight = 0;
473 size_t num_requests_in_flight_for_host = 0;
474 GetNumDelayableRequestsInFlight(client, host_port_pair,
475 &num_delayable_requests_in_flight,
476 &num_requests_in_flight_for_host);
477
478 if (num_delayable_requests_in_flight >= kMaxNumDelayableRequestsPerClient) {
479 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING;
480 }
481
482 if (num_requests_in_flight_for_host >= kMaxNumDelayableRequestsPerHost) {
483 // There may be other requests for other hosts we'd allow, so keep checking.
484 return DO_NOT_START_REQUEST_AND_KEEP_SEARCHING;
485 }
486
487 bool have_immediate_requests_in_flight =
488 client->in_flight_requests.size() > num_delayable_requests_in_flight;
489 if (have_immediate_requests_in_flight && !client->has_body &&
490 num_delayable_requests_in_flight != 0) {
491 return DO_NOT_START_REQUEST_AND_STOP_SEARCHING;
492 }
493
494 return START_REQUEST;
495 }
496
MakeClientId(int child_id,int route_id)497 ResourceScheduler::ClientId ResourceScheduler::MakeClientId(
498 int child_id, int route_id) {
499 return (static_cast<ResourceScheduler::ClientId>(child_id) << 32) | route_id;
500 }
501
502 } // namespace content
503