1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "net/url_request/url_request_job.h"
6
7 #include "base/bind.h"
8 #include "base/compiler_specific.h"
9 #include "base/message_loop/message_loop.h"
10 #include "base/power_monitor/power_monitor.h"
11 #include "base/strings/string_number_conversions.h"
12 #include "base/strings/string_util.h"
13 #include "net/base/auth.h"
14 #include "net/base/host_port_pair.h"
15 #include "net/base/io_buffer.h"
16 #include "net/base/load_states.h"
17 #include "net/base/net_errors.h"
18 #include "net/base/network_delegate.h"
19 #include "net/filter/filter.h"
20 #include "net/http/http_response_headers.h"
21 #include "net/url_request/url_request.h"
22
23 namespace net {
24
URLRequestJob(URLRequest * request,NetworkDelegate * network_delegate)25 URLRequestJob::URLRequestJob(URLRequest* request,
26 NetworkDelegate* network_delegate)
27 : request_(request),
28 done_(false),
29 prefilter_bytes_read_(0),
30 postfilter_bytes_read_(0),
31 filter_input_byte_count_(0),
32 filter_needs_more_output_space_(false),
33 filtered_read_buffer_len_(0),
34 has_handled_response_(false),
35 expected_content_size_(-1),
36 deferred_redirect_status_code_(-1),
37 network_delegate_(network_delegate),
38 weak_factory_(this) {
39 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
40 if (power_monitor)
41 power_monitor->AddObserver(this);
42 }
43
SetUpload(UploadDataStream * upload)44 void URLRequestJob::SetUpload(UploadDataStream* upload) {
45 }
46
SetExtraRequestHeaders(const HttpRequestHeaders & headers)47 void URLRequestJob::SetExtraRequestHeaders(const HttpRequestHeaders& headers) {
48 }
49
SetPriority(RequestPriority priority)50 void URLRequestJob::SetPriority(RequestPriority priority) {
51 }
52
Kill()53 void URLRequestJob::Kill() {
54 weak_factory_.InvalidateWeakPtrs();
55 // Make sure the request is notified that we are done. We assume that the
56 // request took care of setting its error status before calling Kill.
57 if (request_)
58 NotifyCanceled();
59 }
60
DetachRequest()61 void URLRequestJob::DetachRequest() {
62 request_ = NULL;
63 }
64
65 // This function calls ReadData to get stream data. If a filter exists, passes
66 // the data to the attached filter. Then returns the output from filter back to
67 // the caller.
Read(IOBuffer * buf,int buf_size,int * bytes_read)68 bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
69 bool rv = false;
70
71 DCHECK_LT(buf_size, 1000000); // Sanity check.
72 DCHECK(buf);
73 DCHECK(bytes_read);
74 DCHECK(filtered_read_buffer_.get() == NULL);
75 DCHECK_EQ(0, filtered_read_buffer_len_);
76
77 *bytes_read = 0;
78
79 // Skip Filter if not present.
80 if (!filter_.get()) {
81 rv = ReadRawDataHelper(buf, buf_size, bytes_read);
82 } else {
83 // Save the caller's buffers while we do IO
84 // in the filter's buffers.
85 filtered_read_buffer_ = buf;
86 filtered_read_buffer_len_ = buf_size;
87
88 if (ReadFilteredData(bytes_read)) {
89 rv = true; // We have data to return.
90
91 // It is fine to call DoneReading even if ReadFilteredData receives 0
92 // bytes from the net, but we avoid making that call if we know for
93 // sure that's the case (ReadRawDataHelper path).
94 if (*bytes_read == 0)
95 DoneReading();
96 } else {
97 rv = false; // Error, or a new IO is pending.
98 }
99 }
100 if (rv && *bytes_read == 0)
101 NotifyDone(URLRequestStatus());
102 return rv;
103 }
104
StopCaching()105 void URLRequestJob::StopCaching() {
106 // Nothing to do here.
107 }
108
GetFullRequestHeaders(HttpRequestHeaders * headers) const109 bool URLRequestJob::GetFullRequestHeaders(HttpRequestHeaders* headers) const {
110 // Most job types don't send request headers.
111 return false;
112 }
113
GetTotalReceivedBytes() const114 int64 URLRequestJob::GetTotalReceivedBytes() const {
115 return 0;
116 }
117
GetLoadState() const118 LoadState URLRequestJob::GetLoadState() const {
119 return LOAD_STATE_IDLE;
120 }
121
GetUploadProgress() const122 UploadProgress URLRequestJob::GetUploadProgress() const {
123 return UploadProgress();
124 }
125
GetCharset(std::string * charset)126 bool URLRequestJob::GetCharset(std::string* charset) {
127 return false;
128 }
129
GetResponseInfo(HttpResponseInfo * info)130 void URLRequestJob::GetResponseInfo(HttpResponseInfo* info) {
131 }
132
GetLoadTimingInfo(LoadTimingInfo * load_timing_info) const133 void URLRequestJob::GetLoadTimingInfo(LoadTimingInfo* load_timing_info) const {
134 // Only certain request types return more than just request start times.
135 }
136
GetResponseCookies(std::vector<std::string> * cookies)137 bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
138 return false;
139 }
140
SetupFilter() const141 Filter* URLRequestJob::SetupFilter() const {
142 return NULL;
143 }
144
IsRedirectResponse(GURL * location,int * http_status_code)145 bool URLRequestJob::IsRedirectResponse(GURL* location,
146 int* http_status_code) {
147 // For non-HTTP jobs, headers will be null.
148 HttpResponseHeaders* headers = request_->response_headers();
149 if (!headers)
150 return false;
151
152 std::string value;
153 if (!headers->IsRedirect(&value))
154 return false;
155
156 *location = request_->url().Resolve(value);
157 *http_status_code = headers->response_code();
158 return true;
159 }
160
CopyFragmentOnRedirect(const GURL & location) const161 bool URLRequestJob::CopyFragmentOnRedirect(const GURL& location) const {
162 return true;
163 }
164
IsSafeRedirect(const GURL & location)165 bool URLRequestJob::IsSafeRedirect(const GURL& location) {
166 return true;
167 }
168
NeedsAuth()169 bool URLRequestJob::NeedsAuth() {
170 return false;
171 }
172
GetAuthChallengeInfo(scoped_refptr<AuthChallengeInfo> * auth_info)173 void URLRequestJob::GetAuthChallengeInfo(
174 scoped_refptr<AuthChallengeInfo>* auth_info) {
175 // This will only be called if NeedsAuth() returns true, in which
176 // case the derived class should implement this!
177 NOTREACHED();
178 }
179
SetAuth(const AuthCredentials & credentials)180 void URLRequestJob::SetAuth(const AuthCredentials& credentials) {
181 // This will only be called if NeedsAuth() returns true, in which
182 // case the derived class should implement this!
183 NOTREACHED();
184 }
185
CancelAuth()186 void URLRequestJob::CancelAuth() {
187 // This will only be called if NeedsAuth() returns true, in which
188 // case the derived class should implement this!
189 NOTREACHED();
190 }
191
ContinueWithCertificate(X509Certificate * client_cert)192 void URLRequestJob::ContinueWithCertificate(
193 X509Certificate* client_cert) {
194 // The derived class should implement this!
195 NOTREACHED();
196 }
197
ContinueDespiteLastError()198 void URLRequestJob::ContinueDespiteLastError() {
199 // Implementations should know how to recover from errors they generate.
200 // If this code was reached, we are trying to recover from an error that
201 // we don't know how to recover from.
202 NOTREACHED();
203 }
204
FollowDeferredRedirect()205 void URLRequestJob::FollowDeferredRedirect() {
206 DCHECK(deferred_redirect_status_code_ != -1);
207
208 // NOTE: deferred_redirect_url_ may be invalid, and attempting to redirect to
209 // such an URL will fail inside FollowRedirect. The DCHECK above asserts
210 // that we called OnReceivedRedirect.
211
212 // It is also possible that FollowRedirect will drop the last reference to
213 // this job, so we need to reset our members before calling it.
214
215 GURL redirect_url = deferred_redirect_url_;
216 int redirect_status_code = deferred_redirect_status_code_;
217
218 deferred_redirect_url_ = GURL();
219 deferred_redirect_status_code_ = -1;
220
221 FollowRedirect(redirect_url, redirect_status_code);
222 }
223
ResumeNetworkStart()224 void URLRequestJob::ResumeNetworkStart() {
225 // This should only be called for HTTP Jobs, and implemented in the derived
226 // class.
227 NOTREACHED();
228 }
229
GetMimeType(std::string * mime_type) const230 bool URLRequestJob::GetMimeType(std::string* mime_type) const {
231 return false;
232 }
233
GetResponseCode() const234 int URLRequestJob::GetResponseCode() const {
235 return -1;
236 }
237
GetSocketAddress() const238 HostPortPair URLRequestJob::GetSocketAddress() const {
239 return HostPortPair();
240 }
241
OnSuspend()242 void URLRequestJob::OnSuspend() {
243 Kill();
244 }
245
NotifyURLRequestDestroyed()246 void URLRequestJob::NotifyURLRequestDestroyed() {
247 }
248
~URLRequestJob()249 URLRequestJob::~URLRequestJob() {
250 base::PowerMonitor* power_monitor = base::PowerMonitor::Get();
251 if (power_monitor)
252 power_monitor->RemoveObserver(this);
253 }
254
NotifyCertificateRequested(SSLCertRequestInfo * cert_request_info)255 void URLRequestJob::NotifyCertificateRequested(
256 SSLCertRequestInfo* cert_request_info) {
257 if (!request_)
258 return; // The request was destroyed, so there is no more work to do.
259
260 request_->NotifyCertificateRequested(cert_request_info);
261 }
262
NotifySSLCertificateError(const SSLInfo & ssl_info,bool fatal)263 void URLRequestJob::NotifySSLCertificateError(const SSLInfo& ssl_info,
264 bool fatal) {
265 if (!request_)
266 return; // The request was destroyed, so there is no more work to do.
267
268 request_->NotifySSLCertificateError(ssl_info, fatal);
269 }
270
CanGetCookies(const CookieList & cookie_list) const271 bool URLRequestJob::CanGetCookies(const CookieList& cookie_list) const {
272 if (!request_)
273 return false; // The request was destroyed, so there is no more work to do.
274
275 return request_->CanGetCookies(cookie_list);
276 }
277
CanSetCookie(const std::string & cookie_line,CookieOptions * options) const278 bool URLRequestJob::CanSetCookie(const std::string& cookie_line,
279 CookieOptions* options) const {
280 if (!request_)
281 return false; // The request was destroyed, so there is no more work to do.
282
283 return request_->CanSetCookie(cookie_line, options);
284 }
285
CanEnablePrivacyMode() const286 bool URLRequestJob::CanEnablePrivacyMode() const {
287 if (!request_)
288 return false; // The request was destroyed, so there is no more work to do.
289
290 return request_->CanEnablePrivacyMode();
291 }
292
GetCookieStore() const293 CookieStore* URLRequestJob::GetCookieStore() const {
294 DCHECK(request_);
295
296 return request_->cookie_store();
297 }
298
NotifyBeforeNetworkStart(bool * defer)299 void URLRequestJob::NotifyBeforeNetworkStart(bool* defer) {
300 if (!request_)
301 return;
302
303 request_->NotifyBeforeNetworkStart(defer);
304 }
305
NotifyHeadersComplete()306 void URLRequestJob::NotifyHeadersComplete() {
307 if (!request_ || !request_->has_delegate())
308 return; // The request was destroyed, so there is no more work to do.
309
310 if (has_handled_response_)
311 return;
312
313 DCHECK(!request_->status().is_io_pending());
314
315 // Initialize to the current time, and let the subclass optionally override
316 // the time stamps if it has that information. The default request_time is
317 // set by URLRequest before it calls our Start method.
318 request_->response_info_.response_time = base::Time::Now();
319 GetResponseInfo(&request_->response_info_);
320
321 // When notifying the delegate, the delegate can release the request
322 // (and thus release 'this'). After calling to the delgate, we must
323 // check the request pointer to see if it still exists, and return
324 // immediately if it has been destroyed. self_preservation ensures our
325 // survival until we can get out of this method.
326 scoped_refptr<URLRequestJob> self_preservation(this);
327
328 if (request_)
329 request_->OnHeadersComplete();
330
331 GURL new_location;
332 int http_status_code;
333 if (IsRedirectResponse(&new_location, &http_status_code)) {
334 // Redirect response bodies are not read. Notify the transaction
335 // so it does not treat being stopped as an error.
336 DoneReadingRedirectResponse();
337
338 const GURL& url = request_->url();
339
340 // Move the reference fragment of the old location to the new one if the
341 // new one has none. This duplicates mozilla's behavior.
342 if (url.is_valid() && url.has_ref() && !new_location.has_ref() &&
343 CopyFragmentOnRedirect(new_location)) {
344 GURL::Replacements replacements;
345 // Reference the |ref| directly out of the original URL to avoid a
346 // malloc.
347 replacements.SetRef(url.spec().data(),
348 url.parsed_for_possibly_invalid_spec().ref);
349 new_location = new_location.ReplaceComponents(replacements);
350 }
351
352 bool defer_redirect = false;
353 request_->NotifyReceivedRedirect(new_location, &defer_redirect);
354
355 // Ensure that the request wasn't detached or destroyed in
356 // NotifyReceivedRedirect
357 if (!request_ || !request_->has_delegate())
358 return;
359
360 // If we were not cancelled, then maybe follow the redirect.
361 if (request_->status().is_success()) {
362 if (defer_redirect) {
363 deferred_redirect_url_ = new_location;
364 deferred_redirect_status_code_ = http_status_code;
365 } else {
366 FollowRedirect(new_location, http_status_code);
367 }
368 return;
369 }
370 } else if (NeedsAuth()) {
371 scoped_refptr<AuthChallengeInfo> auth_info;
372 GetAuthChallengeInfo(&auth_info);
373 // Need to check for a NULL auth_info because the server may have failed
374 // to send a challenge with the 401 response.
375 if (auth_info.get()) {
376 request_->NotifyAuthRequired(auth_info.get());
377 // Wait for SetAuth or CancelAuth to be called.
378 return;
379 }
380 }
381
382 has_handled_response_ = true;
383 if (request_->status().is_success())
384 filter_.reset(SetupFilter());
385
386 if (!filter_.get()) {
387 std::string content_length;
388 request_->GetResponseHeaderByName("content-length", &content_length);
389 if (!content_length.empty())
390 base::StringToInt64(content_length, &expected_content_size_);
391 }
392
393 request_->NotifyResponseStarted();
394 }
395
NotifyReadComplete(int bytes_read)396 void URLRequestJob::NotifyReadComplete(int bytes_read) {
397 if (!request_ || !request_->has_delegate())
398 return; // The request was destroyed, so there is no more work to do.
399
400 // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
401 // unit_tests have been fixed to not trip this.
402 #if 0
403 DCHECK(!request_->status().is_io_pending());
404 #endif
405 // The headers should be complete before reads complete
406 DCHECK(has_handled_response_);
407
408 OnRawReadComplete(bytes_read);
409
410 // Don't notify if we had an error.
411 if (!request_->status().is_success())
412 return;
413
414 // When notifying the delegate, the delegate can release the request
415 // (and thus release 'this'). After calling to the delegate, we must
416 // check the request pointer to see if it still exists, and return
417 // immediately if it has been destroyed. self_preservation ensures our
418 // survival until we can get out of this method.
419 scoped_refptr<URLRequestJob> self_preservation(this);
420
421 if (filter_.get()) {
422 // Tell the filter that it has more data
423 FilteredDataRead(bytes_read);
424
425 // Filter the data.
426 int filter_bytes_read = 0;
427 if (ReadFilteredData(&filter_bytes_read)) {
428 if (!filter_bytes_read)
429 DoneReading();
430 request_->NotifyReadCompleted(filter_bytes_read);
431 }
432 } else {
433 request_->NotifyReadCompleted(bytes_read);
434 }
435 DVLOG(1) << __FUNCTION__ << "() "
436 << "\"" << (request_ ? request_->url().spec() : "???") << "\""
437 << " pre bytes read = " << bytes_read
438 << " pre total = " << prefilter_bytes_read_
439 << " post total = " << postfilter_bytes_read_;
440 }
441
NotifyStartError(const URLRequestStatus & status)442 void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
443 DCHECK(!has_handled_response_);
444 has_handled_response_ = true;
445 if (request_) {
446 // There may be relevant information in the response info even in the
447 // error case.
448 GetResponseInfo(&request_->response_info_);
449
450 request_->set_status(status);
451 request_->NotifyResponseStarted();
452 // We may have been deleted.
453 }
454 }
455
NotifyDone(const URLRequestStatus & status)456 void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
457 DCHECK(!done_) << "Job sending done notification twice";
458 if (done_)
459 return;
460 done_ = true;
461
462 // Unless there was an error, we should have at least tried to handle
463 // the response before getting here.
464 DCHECK(has_handled_response_ || !status.is_success());
465
466 // As with NotifyReadComplete, we need to take care to notice if we were
467 // destroyed during a delegate callback.
468 if (request_) {
469 request_->set_is_pending(false);
470 // With async IO, it's quite possible to have a few outstanding
471 // requests. We could receive a request to Cancel, followed shortly
472 // by a successful IO. For tracking the status(), once there is
473 // an error, we do not change the status back to success. To
474 // enforce this, only set the status if the job is so far
475 // successful.
476 if (request_->status().is_success()) {
477 if (status.status() == URLRequestStatus::FAILED) {
478 request_->net_log().AddEventWithNetErrorCode(NetLog::TYPE_FAILED,
479 status.error());
480 }
481 request_->set_status(status);
482 }
483 }
484
485 // Complete this notification later. This prevents us from re-entering the
486 // delegate if we're done because of a synchronous call.
487 base::MessageLoop::current()->PostTask(
488 FROM_HERE,
489 base::Bind(&URLRequestJob::CompleteNotifyDone,
490 weak_factory_.GetWeakPtr()));
491 }
492
CompleteNotifyDone()493 void URLRequestJob::CompleteNotifyDone() {
494 // Check if we should notify the delegate that we're done because of an error.
495 if (request_ &&
496 !request_->status().is_success() &&
497 request_->has_delegate()) {
498 // We report the error differently depending on whether we've called
499 // OnResponseStarted yet.
500 if (has_handled_response_) {
501 // We signal the error by calling OnReadComplete with a bytes_read of -1.
502 request_->NotifyReadCompleted(-1);
503 } else {
504 has_handled_response_ = true;
505 request_->NotifyResponseStarted();
506 }
507 }
508 }
509
NotifyCanceled()510 void URLRequestJob::NotifyCanceled() {
511 if (!done_) {
512 NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED, ERR_ABORTED));
513 }
514 }
515
NotifyRestartRequired()516 void URLRequestJob::NotifyRestartRequired() {
517 DCHECK(!has_handled_response_);
518 if (GetStatus().status() != URLRequestStatus::CANCELED)
519 request_->Restart();
520 }
521
OnCallToDelegate()522 void URLRequestJob::OnCallToDelegate() {
523 request_->OnCallToDelegate();
524 }
525
OnCallToDelegateComplete()526 void URLRequestJob::OnCallToDelegateComplete() {
527 request_->OnCallToDelegateComplete();
528 }
529
ReadRawData(IOBuffer * buf,int buf_size,int * bytes_read)530 bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size,
531 int *bytes_read) {
532 DCHECK(bytes_read);
533 *bytes_read = 0;
534 return true;
535 }
536
DoneReading()537 void URLRequestJob::DoneReading() {
538 // Do nothing.
539 }
540
DoneReadingRedirectResponse()541 void URLRequestJob::DoneReadingRedirectResponse() {
542 }
543
FilteredDataRead(int bytes_read)544 void URLRequestJob::FilteredDataRead(int bytes_read) {
545 DCHECK(filter_);
546 filter_->FlushStreamBuffer(bytes_read);
547 }
548
ReadFilteredData(int * bytes_read)549 bool URLRequestJob::ReadFilteredData(int* bytes_read) {
550 DCHECK(filter_);
551 DCHECK(filtered_read_buffer_);
552 DCHECK_GT(filtered_read_buffer_len_, 0);
553 DCHECK_LT(filtered_read_buffer_len_, 1000000); // Sanity check.
554 DCHECK(!raw_read_buffer_);
555
556 *bytes_read = 0;
557 bool rv = false;
558
559 for (;;) {
560 if (is_done())
561 return true;
562
563 if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
564 // We don't have any raw data to work with, so read from the transaction.
565 int filtered_data_read;
566 if (ReadRawDataForFilter(&filtered_data_read)) {
567 if (filtered_data_read > 0) {
568 // Give data to filter.
569 filter_->FlushStreamBuffer(filtered_data_read);
570 } else {
571 return true; // EOF.
572 }
573 } else {
574 return false; // IO Pending (or error).
575 }
576 }
577
578 if ((filter_->stream_data_len() || filter_needs_more_output_space_) &&
579 !is_done()) {
580 // Get filtered data.
581 int filtered_data_len = filtered_read_buffer_len_;
582 int output_buffer_size = filtered_data_len;
583 Filter::FilterStatus status =
584 filter_->ReadData(filtered_read_buffer_->data(), &filtered_data_len);
585
586 if (filter_needs_more_output_space_ && !filtered_data_len) {
587 // filter_needs_more_output_space_ was mistaken... there are no more
588 // bytes and we should have at least tried to fill up the filter's input
589 // buffer. Correct the state, and try again.
590 filter_needs_more_output_space_ = false;
591 continue;
592 }
593 filter_needs_more_output_space_ =
594 (filtered_data_len == output_buffer_size);
595
596 switch (status) {
597 case Filter::FILTER_DONE: {
598 filter_needs_more_output_space_ = false;
599 *bytes_read = filtered_data_len;
600 postfilter_bytes_read_ += filtered_data_len;
601 rv = true;
602 break;
603 }
604 case Filter::FILTER_NEED_MORE_DATA: {
605 // We have finished filtering all data currently in the buffer.
606 // There might be some space left in the output buffer. One can
607 // consider reading more data from the stream to feed the filter
608 // and filling up the output buffer. This leads to more complicated
609 // buffer management and data notification mechanisms.
610 // We can revisit this issue if there is a real perf need.
611 if (filtered_data_len > 0) {
612 *bytes_read = filtered_data_len;
613 postfilter_bytes_read_ += filtered_data_len;
614 rv = true;
615 } else {
616 // Read again since we haven't received enough data yet (e.g., we
617 // may not have a complete gzip header yet).
618 continue;
619 }
620 break;
621 }
622 case Filter::FILTER_OK: {
623 *bytes_read = filtered_data_len;
624 postfilter_bytes_read_ += filtered_data_len;
625 rv = true;
626 break;
627 }
628 case Filter::FILTER_ERROR: {
629 DVLOG(1) << __FUNCTION__ << "() "
630 << "\"" << (request_ ? request_->url().spec() : "???")
631 << "\"" << " Filter Error";
632 filter_needs_more_output_space_ = false;
633 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
634 ERR_CONTENT_DECODING_FAILED));
635 rv = false;
636 break;
637 }
638 default: {
639 NOTREACHED();
640 filter_needs_more_output_space_ = false;
641 rv = false;
642 break;
643 }
644 }
645
646 // If logging all bytes is enabled, log the filtered bytes read.
647 if (rv && request() && request()->net_log().IsLoggingBytes() &&
648 filtered_data_len > 0) {
649 request()->net_log().AddByteTransferEvent(
650 NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ,
651 filtered_data_len, filtered_read_buffer_->data());
652 }
653 } else {
654 // we are done, or there is no data left.
655 rv = true;
656 }
657 break;
658 }
659
660 if (rv) {
661 // When we successfully finished a read, we no longer need to save the
662 // caller's buffers. Release our reference.
663 filtered_read_buffer_ = NULL;
664 filtered_read_buffer_len_ = 0;
665 }
666 return rv;
667 }
668
DestroyFilters()669 void URLRequestJob::DestroyFilters() {
670 filter_.reset();
671 }
672
GetStatus()673 const URLRequestStatus URLRequestJob::GetStatus() {
674 if (request_)
675 return request_->status();
676 // If the request is gone, we must be cancelled.
677 return URLRequestStatus(URLRequestStatus::CANCELED,
678 ERR_ABORTED);
679 }
680
SetStatus(const URLRequestStatus & status)681 void URLRequestJob::SetStatus(const URLRequestStatus &status) {
682 if (request_)
683 request_->set_status(status);
684 }
685
SetProxyServer(const HostPortPair & proxy_server)686 void URLRequestJob::SetProxyServer(const HostPortPair& proxy_server) {
687 request_->proxy_server_ = proxy_server;
688 }
689
ReadRawDataForFilter(int * bytes_read)690 bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
691 bool rv = false;
692
693 DCHECK(bytes_read);
694 DCHECK(filter_.get());
695
696 *bytes_read = 0;
697
698 // Get more pre-filtered data if needed.
699 // TODO(mbelshe): is it possible that the filter needs *MORE* data
700 // when there is some data already in the buffer?
701 if (!filter_->stream_data_len() && !is_done()) {
702 IOBuffer* stream_buffer = filter_->stream_buffer();
703 int stream_buffer_size = filter_->stream_buffer_size();
704 rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
705 }
706 return rv;
707 }
708
ReadRawDataHelper(IOBuffer * buf,int buf_size,int * bytes_read)709 bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size,
710 int* bytes_read) {
711 DCHECK(!request_->status().is_io_pending());
712 DCHECK(raw_read_buffer_.get() == NULL);
713
714 // Keep a pointer to the read buffer, so we have access to it in the
715 // OnRawReadComplete() callback in the event that the read completes
716 // asynchronously.
717 raw_read_buffer_ = buf;
718 bool rv = ReadRawData(buf, buf_size, bytes_read);
719
720 if (!request_->status().is_io_pending()) {
721 // If the read completes synchronously, either success or failure,
722 // invoke the OnRawReadComplete callback so we can account for the
723 // completed read.
724 OnRawReadComplete(*bytes_read);
725 }
726 return rv;
727 }
728
FollowRedirect(const GURL & location,int http_status_code)729 void URLRequestJob::FollowRedirect(const GURL& location, int http_status_code) {
730 int rv = request_->Redirect(location, http_status_code);
731 if (rv != OK)
732 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
733 }
734
OnRawReadComplete(int bytes_read)735 void URLRequestJob::OnRawReadComplete(int bytes_read) {
736 DCHECK(raw_read_buffer_.get());
737 // If |filter_| is non-NULL, bytes will be logged after it is applied instead.
738 if (!filter_.get() && request() && request()->net_log().IsLoggingBytes() &&
739 bytes_read > 0) {
740 request()->net_log().AddByteTransferEvent(
741 NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ,
742 bytes_read, raw_read_buffer_->data());
743 }
744
745 if (bytes_read > 0) {
746 RecordBytesRead(bytes_read);
747 }
748 raw_read_buffer_ = NULL;
749 }
750
RecordBytesRead(int bytes_read)751 void URLRequestJob::RecordBytesRead(int bytes_read) {
752 filter_input_byte_count_ += bytes_read;
753 prefilter_bytes_read_ += bytes_read;
754 if (!filter_.get())
755 postfilter_bytes_read_ += bytes_read;
756 DVLOG(2) << __FUNCTION__ << "() "
757 << "\"" << (request_ ? request_->url().spec() : "???") << "\""
758 << " pre bytes read = " << bytes_read
759 << " pre total = " << prefilter_bytes_read_
760 << " post total = " << postfilter_bytes_read_;
761 UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
762 if (network_delegate_)
763 network_delegate_->NotifyRawBytesRead(*request_, bytes_read);
764 }
765
FilterHasData()766 bool URLRequestJob::FilterHasData() {
767 return filter_.get() && filter_->stream_data_len();
768 }
769
UpdatePacketReadTimes()770 void URLRequestJob::UpdatePacketReadTimes() {
771 }
772
773 } // namespace net
774