1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/url_request/url_request_job.h"
6
7 #include "base/bind.h"
8 #include "base/compiler_specific.h"
9 #include "base/message_loop/message_loop.h"
10 #include "base/power_monitor/power_monitor.h"
11 #include "base/strings/string_number_conversions.h"
12 #include "base/strings/string_util.h"
13 #include "net/base/auth.h"
14 #include "net/base/host_port_pair.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/load_states.h"
17 #include "net/base/net_errors.h"
18 #include "net/base/network_delegate.h"
19 #include "net/filter/filter.h"
20 #include "net/http/http_response_headers.h"
21 #include "net/url_request/url_request.h"
22
23 namespace net {
24
URLRequestJob(URLRequest * request,NetworkDelegate * network_delegate)25 URLRequestJob::URLRequestJob(URLRequest* request,
26 NetworkDelegate* network_delegate)
27 : request_(request),
28 done_(false),
29 prefilter_bytes_read_(0),
30 postfilter_bytes_read_(0),
31 filter_input_byte_count_(0),
32 filter_needs_more_output_space_(false),
33 filtered_read_buffer_len_(0),
34 has_handled_response_(false),
35 expected_content_size_(-1),
36 network_delegate_(network_delegate),
37 weak_factory_(this) {
38 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
39 if (power_monitor)
40 power_monitor->AddObserver(this);
41 }
42
SetUpload(UploadDataStream * upload)43 void URLRequestJob::SetUpload(UploadDataStream* upload) {
44 }
45
SetExtraRequestHeaders(const HttpRequestHeaders & headers)46 void URLRequestJob::SetExtraRequestHeaders(const HttpRequestHeaders& headers) {
47 }
48
SetPriority(RequestPriority priority)49 void URLRequestJob::SetPriority(RequestPriority priority) {
50 }
51
Kill()52 void URLRequestJob::Kill() {
53 weak_factory_.InvalidateWeakPtrs();
54 // Make sure the request is notified that we are done. We assume that the
55 // request took care of setting its error status before calling Kill.
56 if (request_)
57 NotifyCanceled();
58 }
59
DetachRequest()60 void URLRequestJob::DetachRequest() {
61 request_ = NULL;
62 }
63
64 // This function calls ReadData to get stream data. If a filter exists, passes
65 // the data to the attached filter. Then returns the output from filter back to
66 // the caller.
Read(IOBuffer * buf,int buf_size,int * bytes_read)67 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
68 bool rv = false;
69
70 DCHECK_LT(buf_size, 1000000); // Sanity check.
71 DCHECK(buf);
72 DCHECK(bytes_read);
73 DCHECK(filtered_read_buffer_.get() == NULL);
74 DCHECK_EQ(0, filtered_read_buffer_len_);
75
76 *bytes_read = 0;
77
78 // Skip Filter if not present.
79 if (!filter_.get()) {
80 rv = ReadRawDataHelper(buf, buf_size, bytes_read);
81 } else {
82 // Save the caller's buffers while we do IO
83 // in the filter's buffers.
84 filtered_read_buffer_ = buf;
85 filtered_read_buffer_len_ = buf_size;
86
87 if (ReadFilteredData(bytes_read)) {
88 rv = true; // We have data to return.
89
90 // It is fine to call DoneReading even if ReadFilteredData receives 0
91 // bytes from the net, but we avoid making that call if we know for
92 // sure that's the case (ReadRawDataHelper path).
93 if (*bytes_read == 0)
94 DoneReading();
95 } else {
96 rv = false; // Error, or a new IO is pending.
97 }
98 }
99 if (rv && *bytes_read == 0)
100 NotifyDone(URLRequestStatus());
101 return rv;
102 }
103
StopCaching()104 void URLRequestJob::StopCaching() {
105 // Nothing to do here.
106 }
107
GetFullRequestHeaders(HttpRequestHeaders * headers) const108 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const {
109 // Most job types don't send request headers.
110 return false;
111 }
112
GetTotalReceivedBytes() const113 int64 URLRequestJob::GetTotalReceivedBytes() const {
114 return 0;
115 }
116
GetLoadState() const117 LoadState URLRequestJob::GetLoadState() const {
118 return LOAD_STATE_IDLE;
119 }
120
GetUploadProgress() const121 UploadProgress URLRequestJob::GetUploadProgress() const {
122 return UploadProgress();
123 }
124
GetCharset(std::string * charset)125 bool URLRequestJob::GetCharset(std::string* charset) {
126 return false;
127 }
128
GetResponseInfo(HttpResponseInfo * info)129 void URLRequestJob::GetResponseInfo(HttpResponseInfo* info) {
130 }
131
GetLoadTimingInfo(LoadTimingInfo * load_timing_info) const132 void URLRequestJob::GetLoadTimingInfo(LoadTimingInfo* load_timing_info) const {
133 // Only certain request types return more than just request start times.
134 }
135
GetResponseCookies(std::vector<std::string> * cookies)136 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
137 return false;
138 }
139
SetupFilter() const140 Filter* URLRequestJob::SetupFilter() const {
141 return NULL;
142 }
143
IsRedirectResponse(GURL * location,int * http_status_code)144 bool URLRequestJob::IsRedirectResponse(GURL* location,
145 int* http_status_code) {
146 // For non-HTTP jobs, headers will be null.
147 HttpResponseHeaders* headers = request_->response_headers();
148 if (!headers)
149 return false;
150
151 std::string value;
152 if (!headers->IsRedirect(&value))
153 return false;
154
155 *location = request_->url().Resolve(value);
156 *http_status_code = headers->response_code();
157 return true;
158 }
159
CopyFragmentOnRedirect(const GURL & location) const160 bool URLRequestJob::CopyFragmentOnRedirect(const GURL& location) const {
161 return true;
162 }
163
IsSafeRedirect(const GURL & location)164 bool URLRequestJob::IsSafeRedirect(const GURL& location) {
165 return true;
166 }
167
NeedsAuth()168 bool URLRequestJob::NeedsAuth() {
169 return false;
170 }
171
GetAuthChallengeInfo(scoped_refptr<AuthChallengeInfo> * auth_info)172 void URLRequestJob::GetAuthChallengeInfo(
173 scoped_refptr<AuthChallengeInfo>* auth_info) {
174 // This will only be called if NeedsAuth() returns true, in which
175 // case the derived class should implement this!
176 NOTREACHED();
177 }
178
SetAuth(const AuthCredentials & credentials)179 void URLRequestJob::SetAuth(const AuthCredentials& credentials) {
180 // This will only be called if NeedsAuth() returns true, in which
181 // case the derived class should implement this!
182 NOTREACHED();
183 }
184
CancelAuth()185 void URLRequestJob::CancelAuth() {
186 // This will only be called if NeedsAuth() returns true, in which
187 // case the derived class should implement this!
188 NOTREACHED();
189 }
190
ContinueWithCertificate(X509Certificate * client_cert)191 void URLRequestJob::ContinueWithCertificate(
192 X509Certificate* client_cert) {
193 // The derived class should implement this!
194 NOTREACHED();
195 }
196
ContinueDespiteLastError()197 void URLRequestJob::ContinueDespiteLastError() {
198 // Implementations should know how to recover from errors they generate.
199 // If this code was reached, we are trying to recover from an error that
200 // we don't know how to recover from.
201 NOTREACHED();
202 }
203
FollowDeferredRedirect()204 void URLRequestJob::FollowDeferredRedirect() {
205 DCHECK_NE(-1, deferred_redirect_info_.status_code);
206
207 // NOTE: deferred_redirect_info_ may be invalid, and attempting to follow it
208 // will fail inside FollowRedirect. The DCHECK above asserts that we called
209 // OnReceivedRedirect.
210
211 // It is also possible that FollowRedirect will drop the last reference to
212 // this job, so we need to reset our members before calling it.
213
214 RedirectInfo redirect_info = deferred_redirect_info_;
215 deferred_redirect_info_ = RedirectInfo();
216 FollowRedirect(redirect_info);
217 }
218
ResumeNetworkStart()219 void URLRequestJob::ResumeNetworkStart() {
220 // This should only be called for HTTP Jobs, and implemented in the derived
221 // class.
222 NOTREACHED();
223 }
224
GetMimeType(std::string * mime_type) const225 bool URLRequestJob::GetMimeType(std::string* mime_type) const {
226 return false;
227 }
228
GetResponseCode() const229 int URLRequestJob::GetResponseCode() const {
230 return -1;
231 }
232
GetSocketAddress() const233 HostPortPair URLRequestJob::GetSocketAddress() const {
234 return HostPortPair();
235 }
236
OnSuspend()237 void URLRequestJob::OnSuspend() {
238 Kill();
239 }
240
NotifyURLRequestDestroyed()241 void URLRequestJob::NotifyURLRequestDestroyed() {
242 }
243
~URLRequestJob()244 URLRequestJob::~URLRequestJob() {
245 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
246 if (power_monitor)
247 power_monitor->RemoveObserver(this);
248 }
249
NotifyCertificateRequested(SSLCertRequestInfo * cert_request_info)250 void URLRequestJob::NotifyCertificateRequested(
251 SSLCertRequestInfo* cert_request_info) {
252 if (!request_)
253 return; // The request was destroyed, so there is no more work to do.
254
255 request_->NotifyCertificateRequested(cert_request_info);
256 }
257
NotifySSLCertificateError(const SSLInfo & ssl_info,bool fatal)258 void URLRequestJob::NotifySSLCertificateError(const SSLInfo& ssl_info,
259 bool fatal) {
260 if (!request_)
261 return; // The request was destroyed, so there is no more work to do.
262
263 request_->NotifySSLCertificateError(ssl_info, fatal);
264 }
265
CanGetCookies(const CookieList & cookie_list) const266 bool URLRequestJob::CanGetCookies(const CookieList& cookie_list) const {
267 if (!request_)
268 return false; // The request was destroyed, so there is no more work to do.
269
270 return request_->CanGetCookies(cookie_list);
271 }
272
CanSetCookie(const std::string & cookie_line,CookieOptions * options) const273 bool URLRequestJob::CanSetCookie(const std::string& cookie_line,
274 CookieOptions* options) const {
275 if (!request_)
276 return false; // The request was destroyed, so there is no more work to do.
277
278 return request_->CanSetCookie(cookie_line, options);
279 }
280
CanEnablePrivacyMode() const281 bool URLRequestJob::CanEnablePrivacyMode() const {
282 if (!request_)
283 return false; // The request was destroyed, so there is no more work to do.
284
285 return request_->CanEnablePrivacyMode();
286 }
287
GetCookieStore() const288 CookieStore* URLRequestJob::GetCookieStore() const {
289 DCHECK(request_);
290
291 return request_->cookie_store();
292 }
293
NotifyBeforeNetworkStart(bool * defer)294 void URLRequestJob::NotifyBeforeNetworkStart(bool* defer) {
295 if (!request_)
296 return;
297
298 request_->NotifyBeforeNetworkStart(defer);
299 }
300
NotifyHeadersComplete()301 void URLRequestJob::NotifyHeadersComplete() {
302 if (!request_ || !request_->has_delegate())
303 return; // The request was destroyed, so there is no more work to do.
304
305 if (has_handled_response_)
306 return;
307
308 DCHECK(!request_->status().is_io_pending());
309
310 // Initialize to the current time, and let the subclass optionally override
311 // the time stamps if it has that information. The default request_time is
312 // set by URLRequest before it calls our Start method.
313 request_->response_info_.response_time = base::Time::Now();
314 GetResponseInfo(&request_->response_info_);
315
316 // When notifying the delegate, the delegate can release the request
317 // (and thus release 'this'). After calling to the delgate, we must
318 // check the request pointer to see if it still exists, and return
319 // immediately if it has been destroyed. self_preservation ensures our
320 // survival until we can get out of this method.
321 scoped_refptr<URLRequestJob> self_preservation(this);
322
323 if (request_)
324 request_->OnHeadersComplete();
325
326 GURL new_location;
327 int http_status_code;
328 if (IsRedirectResponse(&new_location, &http_status_code)) {
329 // Redirect response bodies are not read. Notify the transaction
330 // so it does not treat being stopped as an error.
331 DoneReadingRedirectResponse();
332
333 RedirectInfo redirect_info =
334 ComputeRedirectInfo(new_location, http_status_code);
335
336 bool defer_redirect = false;
337 request_->NotifyReceivedRedirect(redirect_info, &defer_redirect);
338
339 // Ensure that the request wasn't detached or destroyed in
340 // NotifyReceivedRedirect
341 if (!request_ || !request_->has_delegate())
342 return;
343
344 // If we were not cancelled, then maybe follow the redirect.
345 if (request_->status().is_success()) {
346 if (defer_redirect) {
347 deferred_redirect_info_ = redirect_info;
348 } else {
349 FollowRedirect(redirect_info);
350 }
351 return;
352 }
353 } else if (NeedsAuth()) {
354 scoped_refptr<AuthChallengeInfo> auth_info;
355 GetAuthChallengeInfo(&auth_info);
356 // Need to check for a NULL auth_info because the server may have failed
357 // to send a challenge with the 401 response.
358 if (auth_info.get()) {
359 request_->NotifyAuthRequired(auth_info.get());
360 // Wait for SetAuth or CancelAuth to be called.
361 return;
362 }
363 }
364
365 has_handled_response_ = true;
366 if (request_->status().is_success())
367 filter_.reset(SetupFilter());
368
369 if (!filter_.get()) {
370 std::string content_length;
371 request_->GetResponseHeaderByName("content-length", &content_length);
372 if (!content_length.empty())
373 base::StringToInt64(content_length, &expected_content_size_);
374 }
375
376 request_->NotifyResponseStarted();
377 }
378
NotifyReadComplete(int bytes_read)379 void URLRequestJob::NotifyReadComplete(int bytes_read) {
380 if (!request_ || !request_->has_delegate())
381 return; // The request was destroyed, so there is no more work to do.
382
383 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
384 // unit_tests have been fixed to not trip this.
385 #if 0
386 DCHECK(!request_->status().is_io_pending());
387 #endif
388 // The headers should be complete before reads complete
389 DCHECK(has_handled_response_);
390
391 OnRawReadComplete(bytes_read);
392
393 // Don't notify if we had an error.
394 if (!request_->status().is_success())
395 return;
396
397 // When notifying the delegate, the delegate can release the request
398 // (and thus release 'this'). After calling to the delegate, we must
399 // check the request pointer to see if it still exists, and return
400 // immediately if it has been destroyed. self_preservation ensures our
401 // survival until we can get out of this method.
402 scoped_refptr<URLRequestJob> self_preservation(this);
403
404 if (filter_.get()) {
405 // Tell the filter that it has more data
406 FilteredDataRead(bytes_read);
407
408 // Filter the data.
409 int filter_bytes_read = 0;
410 if (ReadFilteredData(&filter_bytes_read)) {
411 if (!filter_bytes_read)
412 DoneReading();
413 request_->NotifyReadCompleted(filter_bytes_read);
414 }
415 } else {
416 request_->NotifyReadCompleted(bytes_read);
417 }
418 DVLOG(1) << __FUNCTION__ << "() "
419 << "\"" << (request_ ? request_->url().spec() : "???") << "\""
420 << " pre bytes read = " << bytes_read
421 << " pre total = " << prefilter_bytes_read_
422 << " post total = " << postfilter_bytes_read_;
423 }
424
NotifyStartError(const URLRequestStatus & status)425 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
426 DCHECK(!has_handled_response_);
427 has_handled_response_ = true;
428 if (request_) {
429 // There may be relevant information in the response info even in the
430 // error case.
431 GetResponseInfo(&request_->response_info_);
432
433 request_->set_status(status);
434 request_->NotifyResponseStarted();
435 // We may have been deleted.
436 }
437 }
438
NotifyDone(const URLRequestStatus & status)439 void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
440 DCHECK(!done_) << "Job sending done notification twice";
441 if (done_)
442 return;
443 done_ = true;
444
445 // Unless there was an error, we should have at least tried to handle
446 // the response before getting here.
447 DCHECK(has_handled_response_ || !status.is_success());
448
449 // As with NotifyReadComplete, we need to take care to notice if we were
450 // destroyed during a delegate callback.
451 if (request_) {
452 request_->set_is_pending(false);
453 // With async IO, it's quite possible to have a few outstanding
454 // requests. We could receive a request to Cancel, followed shortly
455 // by a successful IO. For tracking the status(), once there is
456 // an error, we do not change the status back to success. To
457 // enforce this, only set the status if the job is so far
458 // successful.
459 if (request_->status().is_success()) {
460 if (status.status() == URLRequestStatus::FAILED) {
461 request_->net_log().AddEventWithNetErrorCode(NetLog::TYPE_FAILED,
462 status.error());
463 }
464 request_->set_status(status);
465 }
466 }
467
468 // Complete this notification later. This prevents us from re-entering the
469 // delegate if we're done because of a synchronous call.
470 base::MessageLoop::current()->PostTask(
471 FROM_HERE,
472 base::Bind(&URLRequestJob::CompleteNotifyDone,
473 weak_factory_.GetWeakPtr()));
474 }
475
CompleteNotifyDone()476 void URLRequestJob::CompleteNotifyDone() {
477 // Check if we should notify the delegate that we're done because of an error.
478 if (request_ &&
479 !request_->status().is_success() &&
480 request_->has_delegate()) {
481 // We report the error differently depending on whether we've called
482 // OnResponseStarted yet.
483 if (has_handled_response_) {
484 // We signal the error by calling OnReadComplete with a bytes_read of -1.
485 request_->NotifyReadCompleted(-1);
486 } else {
487 has_handled_response_ = true;
488 request_->NotifyResponseStarted();
489 }
490 }
491 }
492
NotifyCanceled()493 void URLRequestJob::NotifyCanceled() {
494 if (!done_) {
495 NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED, ERR_ABORTED));
496 }
497 }
498
NotifyRestartRequired()499 void URLRequestJob::NotifyRestartRequired() {
500 DCHECK(!has_handled_response_);
501 if (GetStatus().status() != URLRequestStatus::CANCELED)
502 request_->Restart();
503 }
504
OnCallToDelegate()505 void URLRequestJob::OnCallToDelegate() {
506 request_->OnCallToDelegate();
507 }
508
OnCallToDelegateComplete()509 void URLRequestJob::OnCallToDelegateComplete() {
510 request_->OnCallToDelegateComplete();
511 }
512
ReadRawData(IOBuffer * buf,int buf_size,int * bytes_read)513 bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size,
514 int *bytes_read) {
515 DCHECK(bytes_read);
516 *bytes_read = 0;
517 return true;
518 }
519
DoneReading()520 void URLRequestJob::DoneReading() {
521 // Do nothing.
522 }
523
DoneReadingRedirectResponse()524 void URLRequestJob::DoneReadingRedirectResponse() {
525 }
526
FilteredDataRead(int bytes_read)527 void URLRequestJob::FilteredDataRead(int bytes_read) {
528 DCHECK(filter_);
529 filter_->FlushStreamBuffer(bytes_read);
530 }
531
ReadFilteredData(int * bytes_read)532 bool URLRequestJob::ReadFilteredData(int* bytes_read) {
533 DCHECK(filter_);
534 DCHECK(filtered_read_buffer_.get());
535 DCHECK_GT(filtered_read_buffer_len_, 0);
536 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check.
537 DCHECK(!raw_read_buffer_.get());
538
539 *bytes_read = 0;
540 bool rv = false;
541
542 for (;;) {
543 if (is_done())
544 return true;
545
546 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
547 // We don't have any raw data to work with, so read from the transaction.
548 int filtered_data_read;
549 if (ReadRawDataForFilter(&filtered_data_read)) {
550 if (filtered_data_read > 0) {
551 // Give data to filter.
552 filter_->FlushStreamBuffer(filtered_data_read);
553 } else {
554 return true; // EOF.
555 }
556 } else {
557 return false; // IO Pending (or error).
558 }
559 }
560
561 if ((filter_->stream_data_len() || filter_needs_more_output_space_) &&
562 !is_done()) {
563 // Get filtered data.
564 int filtered_data_len = filtered_read_buffer_len_;
565 int output_buffer_size = filtered_data_len;
566 Filter::FilterStatus status =
567 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len);
568
569 if (filter_needs_more_output_space_ && !filtered_data_len) {
570 // filter_needs_more_output_space_ was mistaken... there are no more
571 // bytes and we should have at least tried to fill up the filter's input
572 // buffer. Correct the state, and try again.
573 filter_needs_more_output_space_ = false;
574 continue;
575 }
576 filter_needs_more_output_space_ =
577 (filtered_data_len == output_buffer_size);
578
579 switch (status) {
580 case Filter::FILTER_DONE: {
581 filter_needs_more_output_space_ = false;
582 *bytes_read = filtered_data_len;
583 postfilter_bytes_read_ += filtered_data_len;
584 rv = true;
585 break;
586 }
587 case Filter::FILTER_NEED_MORE_DATA: {
588 // We have finished filtering all data currently in the buffer.
589 // There might be some space left in the output buffer. One can
590 // consider reading more data from the stream to feed the filter
591 // and filling up the output buffer. This leads to more complicated
592 // buffer management and data notification mechanisms.
593 // We can revisit this issue if there is a real perf need.
594 if (filtered_data_len > 0) {
595 *bytes_read = filtered_data_len;
596 postfilter_bytes_read_ += filtered_data_len;
597 rv = true;
598 } else {
599 // Read again since we haven't received enough data yet (e.g., we
600 // may not have a complete gzip header yet).
601 continue;
602 }
603 break;
604 }
605 case Filter::FILTER_OK: {
606 *bytes_read = filtered_data_len;
607 postfilter_bytes_read_ += filtered_data_len;
608 rv = true;
609 break;
610 }
611 case Filter::FILTER_ERROR: {
612 DVLOG(1) << __FUNCTION__ << "() "
613 << "\"" << (request_ ? request_->url().spec() : "???")
614 << "\"" << " Filter Error";
615 filter_needs_more_output_space_ = false;
616 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
617 ERR_CONTENT_DECODING_FAILED));
618 rv = false;
619 break;
620 }
621 default: {
622 NOTREACHED();
623 filter_needs_more_output_space_ = false;
624 rv = false;
625 break;
626 }
627 }
628
629 // If logging all bytes is enabled, log the filtered bytes read.
630 if (rv && request() && request()->net_log().IsLoggingBytes() &&
631 filtered_data_len > 0) {
632 request()->net_log().AddByteTransferEvent(
633 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ,
634 filtered_data_len, filtered_read_buffer_->data());
635 }
636 } else {
637 // we are done, or there is no data left.
638 rv = true;
639 }
640 break;
641 }
642
643 if (rv) {
644 // When we successfully finished a read, we no longer need to save the
645 // caller's buffers. Release our reference.
646 filtered_read_buffer_ = NULL;
647 filtered_read_buffer_len_ = 0;
648 }
649 return rv;
650 }
651
DestroyFilters()652 void URLRequestJob::DestroyFilters() {
653 filter_.reset();
654 }
655
GetStatus()656 const URLRequestStatus URLRequestJob::GetStatus() {
657 if (request_)
658 return request_->status();
659 // If the request is gone, we must be cancelled.
660 return URLRequestStatus(URLRequestStatus::CANCELED,
661 ERR_ABORTED);
662 }
663
SetStatus(const URLRequestStatus & status)664 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
665 if (request_)
666 request_->set_status(status);
667 }
668
SetProxyServer(const HostPortPair & proxy_server)669 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) {
670 request_->proxy_server_ = proxy_server;
671 }
672
ReadRawDataForFilter(int * bytes_read)673 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
674 bool rv = false;
675
676 DCHECK(bytes_read);
677 DCHECK(filter_.get());
678
679 *bytes_read = 0;
680
681 // Get more pre-filtered data if needed.
682 // TODO(mbelshe): is it possible that the filter needs *MORE* data
683 // when there is some data already in the buffer?
684 if (!filter_->stream_data_len() && !is_done()) {
685 IOBuffer* stream_buffer = filter_->stream_buffer();
686 int stream_buffer_size = filter_->stream_buffer_size();
687 rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
688 }
689 return rv;
690 }
691
ReadRawDataHelper(IOBuffer * buf,int buf_size,int * bytes_read)692 bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size,
693 int* bytes_read) {
694 DCHECK(!request_->status().is_io_pending());
695 DCHECK(raw_read_buffer_.get() == NULL);
696
697 // Keep a pointer to the read buffer, so we have access to it in the
698 // OnRawReadComplete() callback in the event that the read completes
699 // asynchronously.
700 raw_read_buffer_ = buf;
701 bool rv = ReadRawData(buf, buf_size, bytes_read);
702
703 if (!request_->status().is_io_pending()) {
704 // If the read completes synchronously, either success or failure,
705 // invoke the OnRawReadComplete callback so we can account for the
706 // completed read.
707 OnRawReadComplete(*bytes_read);
708 }
709 return rv;
710 }
711
FollowRedirect(const RedirectInfo & redirect_info)712 void URLRequestJob::FollowRedirect(const RedirectInfo& redirect_info) {
713 int rv = request_->Redirect(redirect_info);
714 if (rv != OK)
715 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
716 }
717
OnRawReadComplete(int bytes_read)718 void URLRequestJob::OnRawReadComplete(int bytes_read) {
719 DCHECK(raw_read_buffer_.get());
720 // If |filter_| is non-NULL, bytes will be logged after it is applied instead.
721 if (!filter_.get() && request() && request()->net_log().IsLoggingBytes() &&
722 bytes_read > 0) {
723 request()->net_log().AddByteTransferEvent(
724 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ,
725 bytes_read, raw_read_buffer_->data());
726 }
727
728 if (bytes_read > 0) {
729 RecordBytesRead(bytes_read);
730 }
731 raw_read_buffer_ = NULL;
732 }
733
RecordBytesRead(int bytes_read)734 void URLRequestJob::RecordBytesRead(int bytes_read) {
735 filter_input_byte_count_ += bytes_read;
736 prefilter_bytes_read_ += bytes_read;
737 if (!filter_.get())
738 postfilter_bytes_read_ += bytes_read;
739 DVLOG(2) << __FUNCTION__ << "() "
740 << "\"" << (request_ ? request_->url().spec() : "???") << "\""
741 << " pre bytes read = " << bytes_read
742 << " pre total = " << prefilter_bytes_read_
743 << " post total = " << postfilter_bytes_read_;
744 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
745 if (network_delegate_)
746 network_delegate_->NotifyRawBytesRead(*request_, bytes_read);
747 }
748
FilterHasData()749 bool URLRequestJob::FilterHasData() {
750 return filter_.get() && filter_->stream_data_len();
751 }
752
UpdatePacketReadTimes()753 void URLRequestJob::UpdatePacketReadTimes() {
754 }
755
ComputeRedirectInfo(const GURL & location,int http_status_code)756 RedirectInfo URLRequestJob::ComputeRedirectInfo(const GURL& location,
757 int http_status_code) {
758 const GURL& url = request_->url();
759
760 RedirectInfo redirect_info;
761
762 redirect_info.status_code = http_status_code;
763
764 // The request method may change, depending on the status code.
765 redirect_info.new_method = URLRequest::ComputeMethodForRedirect(
766 request_->method(), http_status_code);
767
768 // Move the reference fragment of the old location to the new one if the
769 // new one has none. This duplicates mozilla's behavior.
770 if (url.is_valid() && url.has_ref() && !location.has_ref() &&
771 CopyFragmentOnRedirect(location)) {
772 GURL::Replacements replacements;
773 // Reference the |ref| directly out of the original URL to avoid a
774 // malloc.
775 replacements.SetRef(url.spec().data(),
776 url.parsed_for_possibly_invalid_spec().ref);
777 redirect_info.new_url = location.ReplaceComponents(replacements);
778 } else {
779 redirect_info.new_url = location;
780 }
781
782 // Update the first-party URL if appropriate.
783 if (request_->first_party_url_policy() ==
784 URLRequest::UPDATE_FIRST_PARTY_URL_ON_REDIRECT) {
785 redirect_info.new_first_party_for_cookies = redirect_info.new_url;
786 } else {
787 redirect_info.new_first_party_for_cookies =
788 request_->first_party_for_cookies();
789 }
790
791 // Suppress the referrer if we're redirecting out of https.
792 if (request_->referrer_policy() ==
793 URLRequest::CLEAR_REFERRER_ON_TRANSITION_FROM_SECURE_TO_INSECURE &&
794 GURL(request_->referrer()).SchemeIsSecure() &&
795 !redirect_info.new_url.SchemeIsSecure()) {
796 redirect_info.new_referrer.clear();
797 } else {
798 redirect_info.new_referrer = request_->referrer();
799 }
800
801 return redirect_info;
802 }
803
804 } // namespace net
805