• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "net/url_request/url_request_job.h"
6 
7 #include "base/message_loop.h"
8 #include "base/string_util.h"
9 #include "net/base/auth.h"
10 #include "net/base/io_buffer.h"
11 #include "net/base/load_flags.h"
12 #include "net/base/net_errors.h"
13 #include "net/http/http_response_headers.h"
14 #include "net/url_request/url_request.h"
15 #include "net/url_request/url_request_job_metrics.h"
16 #include "net/url_request/url_request_job_tracker.h"
17 
18 using base::Time;
19 using base::TimeTicks;
20 
21 // Buffer size allocated when de-compressing data.
22 // static
23 const int URLRequestJob::kFilterBufSize = 32 * 1024;
24 
URLRequestJob(URLRequest * request)25 URLRequestJob::URLRequestJob(URLRequest* request)
26     : request_(request),
27       done_(false),
28       filter_needs_more_output_space_(false),
29       read_buffer_(NULL),
30       read_buffer_len_(0),
31       has_handled_response_(false),
32       expected_content_size_(-1),
33       deferred_redirect_status_code_(-1),
34       packet_timing_enabled_(false),
35       filter_input_byte_count_(0),
36       bytes_observed_in_packets_(0),
37       max_packets_timed_(0),
38       observed_packet_count_(0) {
39   load_flags_ = request_->load_flags();
40   is_profiling_ = request->enable_profiling();
41   if (is_profiling()) {
42     metrics_.reset(new URLRequestJobMetrics());
43     metrics_->start_time_ = TimeTicks::Now();
44   }
45   g_url_request_job_tracker.AddNewJob(this);
46 }
47 
~URLRequestJob()48 URLRequestJob::~URLRequestJob() {
49   g_url_request_job_tracker.RemoveJob(this);
50 }
51 
Kill()52 void URLRequestJob::Kill() {
53   // Make sure the request is notified that we are done.  We assume that the
54   // request took care of setting its error status before calling Kill.
55   if (request_)
56     NotifyCanceled();
57 }
58 
DetachRequest()59 void URLRequestJob::DetachRequest() {
60   request_ = NULL;
61 }
62 
IsDownload() const63 bool URLRequestJob::IsDownload() const {
64   return (load_flags_ & net::LOAD_IS_DOWNLOAD) != 0;
65 }
66 
SetupFilter()67 void URLRequestJob::SetupFilter() {
68   std::vector<Filter::FilterType> encoding_types;
69   if (GetContentEncodings(&encoding_types)) {
70     filter_.reset(Filter::Factory(encoding_types, *this));
71   }
72 }
73 
IsRedirectResponse(GURL * location,int * http_status_code)74 bool URLRequestJob::IsRedirectResponse(GURL* location,
75                                        int* http_status_code) {
76   // For non-HTTP jobs, headers will be null.
77   net::HttpResponseHeaders* headers = request_->response_headers();
78   if (!headers)
79     return false;
80 
81   std::string value;
82   if (!headers->IsRedirect(&value))
83     return false;
84 
85   *location = request_->url().Resolve(value);
86   *http_status_code = headers->response_code();
87   return true;
88 }
89 
GetAuthChallengeInfo(scoped_refptr<net::AuthChallengeInfo> * auth_info)90 void URLRequestJob::GetAuthChallengeInfo(
91     scoped_refptr<net::AuthChallengeInfo>* auth_info) {
92   // This will only be called if NeedsAuth() returns true, in which
93   // case the derived class should implement this!
94   NOTREACHED();
95 }
96 
SetAuth(const std::wstring & username,const std::wstring & password)97 void URLRequestJob::SetAuth(const std::wstring& username,
98                             const std::wstring& password) {
99   // This will only be called if NeedsAuth() returns true, in which
100   // case the derived class should implement this!
101   NOTREACHED();
102 }
103 
CancelAuth()104 void URLRequestJob::CancelAuth() {
105   // This will only be called if NeedsAuth() returns true, in which
106   // case the derived class should implement this!
107   NOTREACHED();
108 }
109 
ContinueWithCertificate(net::X509Certificate * client_cert)110 void URLRequestJob::ContinueWithCertificate(
111     net::X509Certificate* client_cert) {
112   // The derived class should implement this!
113   NOTREACHED();
114 }
115 
ContinueDespiteLastError()116 void URLRequestJob::ContinueDespiteLastError() {
117   // Implementations should know how to recover from errors they generate.
118   // If this code was reached, we are trying to recover from an error that
119   // we don't know how to recover from.
120   NOTREACHED();
121 }
122 
FollowDeferredRedirect()123 void URLRequestJob::FollowDeferredRedirect() {
124   DCHECK(deferred_redirect_status_code_ != -1);
125 
126   // NOTE: deferred_redirect_url_ may be invalid, and attempting to redirect to
127   // such an URL will fail inside FollowRedirect.  The DCHECK above asserts
128   // that we called OnReceivedRedirect.
129 
130   // It is also possible that FollowRedirect will drop the last reference to
131   // this job, so we need to reset our members before calling it.
132 
133   GURL redirect_url = deferred_redirect_url_;
134   int redirect_status_code = deferred_redirect_status_code_;
135 
136   deferred_redirect_url_ = GURL();
137   deferred_redirect_status_code_ = -1;
138 
139   FollowRedirect(redirect_url, redirect_status_code);
140 }
141 
GetByteReadCount() const142 int64 URLRequestJob::GetByteReadCount() const {
143   return filter_input_byte_count_;
144 }
145 
GetURL(GURL * gurl) const146 bool URLRequestJob::GetURL(GURL* gurl) const {
147   if (!request_)
148     return false;
149   *gurl = request_->url();
150   return true;
151 }
152 
GetRequestTime() const153 base::Time URLRequestJob::GetRequestTime() const {
154   if (!request_)
155     return base::Time();
156   return request_->request_time();
157 };
158 
159 // This function calls ReadData to get stream data. If a filter exists, passes
160 // the data to the attached filter. Then returns the output from filter back to
161 // the caller.
Read(net::IOBuffer * buf,int buf_size,int * bytes_read)162 bool URLRequestJob::Read(net::IOBuffer* buf, int buf_size, int *bytes_read) {
163   bool rv = false;
164 
165   DCHECK_LT(buf_size, 1000000);  // sanity check
166   DCHECK(buf);
167   DCHECK(bytes_read);
168 
169   *bytes_read = 0;
170 
171   // Skip Filter if not present
172   if (!filter_.get()) {
173     rv = ReadRawData(buf, buf_size, bytes_read);
174     if (rv && *bytes_read > 0)
175       RecordBytesRead(*bytes_read);
176   } else {
177     // Save the caller's buffers while we do IO
178     // in the filter's buffers.
179     read_buffer_ = buf;
180     read_buffer_len_ = buf_size;
181 
182     if (ReadFilteredData(bytes_read)) {
183       rv = true;   // we have data to return
184     } else {
185       rv = false;  // error, or a new IO is pending
186     }
187   }
188   if (rv && *bytes_read == 0)
189     NotifyDone(URLRequestStatus());
190   return rv;
191 }
192 
ReadRawDataForFilter(int * bytes_read)193 bool URLRequestJob::ReadRawDataForFilter(int *bytes_read) {
194   bool rv = false;
195 
196   DCHECK(bytes_read);
197   DCHECK(filter_.get());
198 
199   *bytes_read = 0;
200 
201   // Get more pre-filtered data if needed.
202   // TODO(mbelshe): is it possible that the filter needs *MORE* data
203   //    when there is some data already in the buffer?
204   if (!filter_->stream_data_len() && !is_done()) {
205     net::IOBuffer* stream_buffer = filter_->stream_buffer();
206     int stream_buffer_size = filter_->stream_buffer_size();
207     rv = ReadRawData(stream_buffer, stream_buffer_size, bytes_read);
208     if (rv && *bytes_read > 0)
209       RecordBytesRead(*bytes_read);
210   }
211   return rv;
212 }
213 
FollowRedirect(const GURL & location,int http_status_code)214 void URLRequestJob::FollowRedirect(const GURL& location, int http_status_code) {
215   g_url_request_job_tracker.OnJobRedirect(this, location, http_status_code);
216 
217   int rv = request_->Redirect(location, http_status_code);
218   if (rv != net::OK)
219     NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
220 }
221 
FilteredDataRead(int bytes_read)222 void URLRequestJob::FilteredDataRead(int bytes_read) {
223   DCHECK(filter_.get());  // don't add data if there is no filter
224   filter_->FlushStreamBuffer(bytes_read);
225 }
226 
ReadFilteredData(int * bytes_read)227 bool URLRequestJob::ReadFilteredData(int *bytes_read) {
228   DCHECK(filter_.get());  // don't add data if there is no filter
229   DCHECK(read_buffer_ != NULL);  // we need to have a buffer to fill
230   DCHECK_GT(read_buffer_len_, 0);  // sanity check
231   DCHECK_LT(read_buffer_len_, 1000000);  // sanity check
232 
233   bool rv = false;
234   *bytes_read = 0;
235 
236   if (is_done())
237     return true;
238 
239   if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
240     // We don't have any raw data to work with, so
241     // read from the socket.
242     int filtered_data_read;
243     if (ReadRawDataForFilter(&filtered_data_read)) {
244       if (filtered_data_read > 0) {
245         filter_->FlushStreamBuffer(filtered_data_read);  // Give data to filter.
246       } else {
247         return true;  // EOF
248       }
249     } else {
250       return false;  // IO Pending (or error)
251     }
252   }
253 
254   if ((filter_->stream_data_len() || filter_needs_more_output_space_)
255       && !is_done()) {
256     // Get filtered data.
257     int filtered_data_len = read_buffer_len_;
258     Filter::FilterStatus status;
259     int output_buffer_size = filtered_data_len;
260     status = filter_->ReadData(read_buffer_->data(), &filtered_data_len);
261 
262     if (filter_needs_more_output_space_ && 0 == filtered_data_len) {
263       // filter_needs_more_output_space_ was mistaken... there are no more bytes
264       // and we should have at least tried to fill up the filter's input buffer.
265       // Correct the state, and try again.
266       filter_needs_more_output_space_ = false;
267       return ReadFilteredData(bytes_read);
268     }
269 
270     switch (status) {
271       case Filter::FILTER_DONE: {
272         filter_needs_more_output_space_ = false;
273         *bytes_read = filtered_data_len;
274         rv = true;
275         break;
276       }
277       case Filter::FILTER_NEED_MORE_DATA: {
278         filter_needs_more_output_space_ =
279             (filtered_data_len == output_buffer_size);
280         // We have finished filtering all data currently in the buffer.
281         // There might be some space left in the output buffer. One can
282         // consider reading more data from the stream to feed the filter
283         // and filling up the output buffer. This leads to more complicated
284         // buffer management and data notification mechanisms.
285         // We can revisit this issue if there is a real perf need.
286         if (filtered_data_len > 0) {
287           *bytes_read = filtered_data_len;
288           rv = true;
289         } else {
290           // Read again since we haven't received enough data yet (e.g., we may
291           // not have a complete gzip header yet)
292           rv = ReadFilteredData(bytes_read);
293         }
294         break;
295       }
296       case Filter::FILTER_OK: {
297         filter_needs_more_output_space_ =
298             (filtered_data_len == output_buffer_size);
299         *bytes_read = filtered_data_len;
300         rv = true;
301         break;
302       }
303       case Filter::FILTER_ERROR: {
304         filter_needs_more_output_space_ = false;
305         NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
306                    net::ERR_CONTENT_DECODING_FAILED));
307         rv = false;
308         break;
309       }
310       default: {
311         NOTREACHED();
312         filter_needs_more_output_space_ = false;
313         rv = false;
314         break;
315       }
316     }
317   } else {
318     // we are done, or there is no data left.
319     rv = true;
320   }
321 
322   if (rv) {
323     // When we successfully finished a read, we no longer need to
324     // save the caller's buffers.  For debugging purposes, we clear
325     // them out.
326     read_buffer_ = NULL;
327     read_buffer_len_ = 0;
328   }
329   return rv;
330 }
331 
ReadRawData(net::IOBuffer * buf,int buf_size,int * bytes_read)332 bool URLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size,
333                                 int *bytes_read) {
334   DCHECK(bytes_read);
335   *bytes_read = 0;
336   NotifyDone(URLRequestStatus());
337   return false;
338 }
339 
RetrieveMetrics()340 URLRequestJobMetrics* URLRequestJob::RetrieveMetrics() {
341   if (is_profiling())
342     return metrics_.release();
343   else
344     return NULL;
345 }
346 
NotifyHeadersComplete()347 void URLRequestJob::NotifyHeadersComplete() {
348   if (!request_ || !request_->delegate())
349     return;  // The request was destroyed, so there is no more work to do.
350 
351   if (has_handled_response_)
352     return;
353 
354   DCHECK(!request_->status().is_io_pending());
355 
356   // Initialize to the current time, and let the subclass optionally override
357   // the time stamps if it has that information.  The default request_time is
358   // set by URLRequest before it calls our Start method.
359   request_->response_info_.response_time = Time::Now();
360   GetResponseInfo(&request_->response_info_);
361 
362   // When notifying the delegate, the delegate can release the request
363   // (and thus release 'this').  After calling to the delgate, we must
364   // check the request pointer to see if it still exists, and return
365   // immediately if it has been destroyed.  self_preservation ensures our
366   // survival until we can get out of this method.
367   scoped_refptr<URLRequestJob> self_preservation = this;
368 
369   GURL new_location;
370   int http_status_code;
371   if (IsRedirectResponse(&new_location, &http_status_code)) {
372     const GURL& url = request_->url();
373 
374     // Move the reference fragment of the old location to the new one if the
375     // new one has none. This duplicates mozilla's behavior.
376     if (url.is_valid() && url.has_ref() && !new_location.has_ref()) {
377       GURL::Replacements replacements;
378       // Reference the |ref| directly out of the original URL to avoid a
379       // malloc.
380       replacements.SetRef(url.spec().data(),
381                           url.parsed_for_possibly_invalid_spec().ref);
382       new_location = new_location.ReplaceComponents(replacements);
383     }
384 
385     bool defer_redirect = false;
386     request_->ReceivedRedirect(new_location, &defer_redirect);
387 
388     // Ensure that the request wasn't detached or destroyed in ReceivedRedirect
389     if (!request_ || !request_->delegate())
390       return;
391 
392     // If we were not cancelled, then maybe follow the redirect.
393     if (request_->status().is_success()) {
394       if (defer_redirect) {
395         deferred_redirect_url_ = new_location;
396         deferred_redirect_status_code_ = http_status_code;
397       } else {
398         FollowRedirect(new_location, http_status_code);
399       }
400       return;
401     }
402   } else if (NeedsAuth()) {
403     scoped_refptr<net::AuthChallengeInfo> auth_info;
404     GetAuthChallengeInfo(&auth_info);
405     // Need to check for a NULL auth_info because the server may have failed
406     // to send a challenge with the 401 response.
407     if (auth_info) {
408       request_->delegate()->OnAuthRequired(request_, auth_info);
409       // Wait for SetAuth or CancelAuth to be called.
410       return;
411     }
412   }
413 
414   has_handled_response_ = true;
415   if (request_->status().is_success())
416     SetupFilter();
417 
418   if (!filter_.get()) {
419     std::string content_length;
420     request_->GetResponseHeaderByName("content-length", &content_length);
421     if (!content_length.empty())
422       expected_content_size_ = StringToInt64(content_length);
423   }
424 
425   request_->ResponseStarted();
426 }
427 
NotifyStartError(const URLRequestStatus & status)428 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
429   DCHECK(!has_handled_response_);
430   has_handled_response_ = true;
431   if (request_) {
432     request_->set_status(status);
433     request_->ResponseStarted();
434   }
435 }
436 
NotifyReadComplete(int bytes_read)437 void URLRequestJob::NotifyReadComplete(int bytes_read) {
438   if (!request_ || !request_->delegate())
439     return;  // The request was destroyed, so there is no more work to do.
440 
441   // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
442   // unit_tests have been fixed to not trip this.
443   //DCHECK(!request_->status().is_io_pending());
444 
445   // The headers should be complete before reads complete
446   DCHECK(has_handled_response_);
447 
448   if (bytes_read > 0)
449     RecordBytesRead(bytes_read);
450 
451   // Don't notify if we had an error.
452   if (!request_->status().is_success())
453     return;
454 
455   // When notifying the delegate, the delegate can release the request
456   // (and thus release 'this').  After calling to the delgate, we must
457   // check the request pointer to see if it still exists, and return
458   // immediately if it has been destroyed.  self_preservation ensures our
459   // survival until we can get out of this method.
460   scoped_refptr<URLRequestJob> self_preservation = this;
461 
462   if (filter_.get()) {
463     // Tell the filter that it has more data
464     FilteredDataRead(bytes_read);
465 
466     // Filter the data.
467     int filter_bytes_read = 0;
468     if (ReadFilteredData(&filter_bytes_read))
469       request_->delegate()->OnReadCompleted(request_, filter_bytes_read);
470   } else {
471     request_->delegate()->OnReadCompleted(request_, bytes_read);
472   }
473 }
474 
NotifyDone(const URLRequestStatus & status)475 void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
476   DCHECK(!done_) << "Job sending done notification twice";
477   if (done_)
478     return;
479   done_ = true;
480 
481   if (is_profiling() && metrics_->total_bytes_read_ > 0) {
482     // There are valid IO statistics. Fill in other fields of metrics for
483     // profiling consumers to retrieve information.
484     metrics_->original_url_.reset(new GURL(request_->original_url()));
485     metrics_->end_time_ = TimeTicks::Now();
486     metrics_->success_ = status.is_success();
487 
488     if (!(request_->original_url() == request_->url())) {
489       metrics_->url_.reset(new GURL(request_->url()));
490     }
491   } else {
492     metrics_.reset();
493   }
494 
495 
496   // Unless there was an error, we should have at least tried to handle
497   // the response before getting here.
498   DCHECK(has_handled_response_ || !status.is_success());
499 
500   // As with NotifyReadComplete, we need to take care to notice if we were
501   // destroyed during a delegate callback.
502   if (request_) {
503     request_->set_is_pending(false);
504     // With async IO, it's quite possible to have a few outstanding
505     // requests.  We could receive a request to Cancel, followed shortly
506     // by a successful IO.  For tracking the status(), once there is
507     // an error, we do not change the status back to success.  To
508     // enforce this, only set the status if the job is so far
509     // successful.
510     if (request_->status().is_success())
511       request_->set_status(status);
512   }
513 
514   g_url_request_job_tracker.OnJobDone(this, status);
515 
516   // Complete this notification later.  This prevents us from re-entering the
517   // delegate if we're done because of a synchronous call.
518   MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
519       this, &URLRequestJob::CompleteNotifyDone));
520 }
521 
CompleteNotifyDone()522 void URLRequestJob::CompleteNotifyDone() {
523   // Check if we should notify the delegate that we're done because of an error.
524   if (request_ &&
525       !request_->status().is_success() &&
526       request_->delegate()) {
527     // We report the error differently depending on whether we've called
528     // OnResponseStarted yet.
529     if (has_handled_response_) {
530       // We signal the error by calling OnReadComplete with a bytes_read of -1.
531       request_->delegate()->OnReadCompleted(request_, -1);
532     } else {
533       has_handled_response_ = true;
534       request_->ResponseStarted();
535     }
536   }
537 }
538 
NotifyCanceled()539 void URLRequestJob::NotifyCanceled() {
540   if (!done_) {
541     NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED,
542                                 net::ERR_ABORTED));
543   }
544 }
545 
NotifyRestartRequired()546 void URLRequestJob::NotifyRestartRequired() {
547   DCHECK(!has_handled_response_);
548   if (GetStatus().status() != URLRequestStatus::CANCELED)
549     request_->Restart();
550 }
551 
FilterHasData()552 bool URLRequestJob::FilterHasData() {
553     return filter_.get() && filter_->stream_data_len();
554 }
555 
RecordBytesRead(int bytes_read)556 void URLRequestJob::RecordBytesRead(int bytes_read) {
557   if (is_profiling()) {
558     ++(metrics_->number_of_read_IO_);
559     metrics_->total_bytes_read_ += bytes_read;
560   }
561   filter_input_byte_count_ += bytes_read;
562   UpdatePacketReadTimes();  // Facilitate stats recording if it is active.
563   g_url_request_job_tracker.OnBytesRead(this, bytes_read);
564 }
565 
GetStatus()566 const URLRequestStatus URLRequestJob::GetStatus() {
567   if (request_)
568     return request_->status();
569   // If the request is gone, we must be cancelled.
570   return URLRequestStatus(URLRequestStatus::CANCELED,
571                           net::ERR_ABORTED);
572 }
573 
SetStatus(const URLRequestStatus & status)574 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
575   if (request_)
576     request_->set_status(status);
577 }
578 
UpdatePacketReadTimes()579 void URLRequestJob::UpdatePacketReadTimes() {
580   if (!packet_timing_enabled_)
581     return;
582 
583   if (filter_input_byte_count_ <= bytes_observed_in_packets_) {
584     DCHECK(filter_input_byte_count_ == bytes_observed_in_packets_);
585     return;  // No new bytes have arrived.
586   }
587 
588   if (!bytes_observed_in_packets_)
589     request_time_snapshot_ = GetRequestTime();
590 
591   final_packet_time_ = base::Time::Now();
592   const size_t kTypicalPacketSize = 1430;
593   while (filter_input_byte_count_ > bytes_observed_in_packets_) {
594     ++observed_packet_count_;
595     if (max_packets_timed_ > packet_times_.size()) {
596       packet_times_.push_back(final_packet_time_);
597       DCHECK(static_cast<size_t>(observed_packet_count_) ==
598              packet_times_.size());
599     }
600     bytes_observed_in_packets_ += kTypicalPacketSize;
601   }
602   // Since packets may not be full, we'll remember the number of bytes we've
603   // accounted for in packets thus far.
604   bytes_observed_in_packets_ = filter_input_byte_count_;
605 }
606 
EnablePacketCounting(size_t max_packets_timed)607 void URLRequestJob::EnablePacketCounting(size_t max_packets_timed) {
608   if (max_packets_timed_ < max_packets_timed)
609     max_packets_timed_ = max_packets_timed;
610   packet_timing_enabled_ = true;
611 }
612 
RecordPacketStats(StatisticSelector statistic) const613 void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const {
614   if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
615     return;
616 
617   // Caller should verify that we're not cached content, but we can't always
618   // really check for it here because we may (at destruction time) call our own
619   // class method and get a bogus const answer of false. This DCHECK only helps
620   // when this method has a valid overridden definition.
621   DCHECK(!IsCachedContent());
622 
623   base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
624   switch (statistic) {
625     case SDCH_DECODE: {
626       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_Latency_F_a", duration,
627                                   base::TimeDelta::FromMilliseconds(20),
628                                   base::TimeDelta::FromMinutes(10), 100);
629       UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Decode_Packets_b",
630                                static_cast<int>(observed_packet_count_));
631       UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
632           static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
633       if (packet_times_.empty())
634         return;
635       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_Last_a",
636                                   final_packet_time_ - packet_times_[0],
637                                   base::TimeDelta::FromMilliseconds(20),
638                                   base::TimeDelta::FromMinutes(10), 100);
639 
640       DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
641       DCHECK(kSdchPacketHistogramCount > 4);
642       if (packet_times_.size() <= 4)
643         return;
644       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_2nd_c",
645                                   packet_times_[1] - packet_times_[0],
646                                   base::TimeDelta::FromMilliseconds(1),
647                                   base::TimeDelta::FromSeconds(10), 100);
648       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_2nd_To_3rd_c",
649                                   packet_times_[2] - packet_times_[1],
650                                   base::TimeDelta::FromMilliseconds(1),
651                                   base::TimeDelta::FromSeconds(10), 100);
652       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_3rd_To_4th_c",
653                                   packet_times_[3] - packet_times_[2],
654                                   base::TimeDelta::FromMilliseconds(1),
655                                   base::TimeDelta::FromSeconds(10), 100);
656       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_4th_To_5th_c",
657                                   packet_times_[4] - packet_times_[3],
658                                   base::TimeDelta::FromMilliseconds(1),
659                                   base::TimeDelta::FromSeconds(10), 100);
660       return;
661     }
662     case SDCH_PASSTHROUGH: {
663       // Despite advertising a dictionary, we handled non-sdch compressed
664       // content.
665       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_Latency_F_a",
666                                   duration,
667                                   base::TimeDelta::FromMilliseconds(20),
668                                   base::TimeDelta::FromMinutes(10), 100);
669       UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Pass-through_Packets_b",
670                                observed_packet_count_);
671       if (packet_times_.empty())
672         return;
673       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_Last_a",
674                                   final_packet_time_ - packet_times_[0],
675                                   base::TimeDelta::FromMilliseconds(20),
676                                   base::TimeDelta::FromMinutes(10), 100);
677       DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
678       DCHECK(kSdchPacketHistogramCount > 4);
679       if (packet_times_.size() <= 4)
680         return;
681       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_2nd_c",
682                                   packet_times_[1] - packet_times_[0],
683                                   base::TimeDelta::FromMilliseconds(1),
684                                   base::TimeDelta::FromSeconds(10), 100);
685       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_2nd_To_3rd_c",
686                                   packet_times_[2] - packet_times_[1],
687                                   base::TimeDelta::FromMilliseconds(1),
688                                   base::TimeDelta::FromSeconds(10), 100);
689       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_3rd_To_4th_c",
690                                   packet_times_[3] - packet_times_[2],
691                                   base::TimeDelta::FromMilliseconds(1),
692                                   base::TimeDelta::FromSeconds(10), 100);
693       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_4th_To_5th_c",
694                                   packet_times_[4] - packet_times_[3],
695                                   base::TimeDelta::FromMilliseconds(1),
696                                   base::TimeDelta::FromSeconds(10), 100);
697       return;
698     }
699 
700     case SDCH_EXPERIMENT_DECODE: {
701       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Decode",
702                                   duration,
703                                   base::TimeDelta::FromMilliseconds(20),
704                                   base::TimeDelta::FromMinutes(10), 100);
705       // We already provided interpacket histograms above in the SDCH_DECODE
706       // case, so we don't need them here.
707       return;
708     }
709     case SDCH_EXPERIMENT_HOLDBACK: {
710       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback",
711                                   duration,
712                                   base::TimeDelta::FromMilliseconds(20),
713                                   base::TimeDelta::FromMinutes(10), 100);
714       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_Last_a",
715                                   final_packet_time_ - packet_times_[0],
716                                   base::TimeDelta::FromMilliseconds(20),
717                                   base::TimeDelta::FromMinutes(10), 100);
718 
719       DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
720       DCHECK(kSdchPacketHistogramCount > 4);
721       if (packet_times_.size() <= 4)
722         return;
723       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_2nd_c",
724                                   packet_times_[1] - packet_times_[0],
725                                   base::TimeDelta::FromMilliseconds(1),
726                                   base::TimeDelta::FromSeconds(10), 100);
727       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_2nd_To_3rd_c",
728                                   packet_times_[2] - packet_times_[1],
729                                   base::TimeDelta::FromMilliseconds(1),
730                                   base::TimeDelta::FromSeconds(10), 100);
731       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_3rd_To_4th_c",
732                                   packet_times_[3] - packet_times_[2],
733                                   base::TimeDelta::FromMilliseconds(1),
734                                   base::TimeDelta::FromSeconds(10), 100);
735       UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_4th_To_5th_c",
736                                   packet_times_[4] - packet_times_[3],
737                                   base::TimeDelta::FromMilliseconds(1),
738                                   base::TimeDelta::FromSeconds(10), 100);
739       return;
740     }
741     default:
742       NOTREACHED();
743       return;
744   }
745 }
746