• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "chrome/browser/safe_browsing/protocol_manager.h"
6 
7 #include "base/environment.h"
8 #include "base/logging.h"
9 #include "base/memory/scoped_vector.h"
10 #include "base/metrics/histogram.h"
11 #include "base/rand_util.h"
12 #include "base/stl_util.h"
13 #include "base/strings/string_util.h"
14 #include "base/strings/stringprintf.h"
15 #include "base/timer/timer.h"
16 #include "chrome/browser/safe_browsing/protocol_parser.h"
17 #include "chrome/common/chrome_version_info.h"
18 #include "chrome/common/env_vars.h"
19 #include "google_apis/google_api_keys.h"
20 #include "net/base/escape.h"
21 #include "net/base/load_flags.h"
22 #include "net/base/net_errors.h"
23 #include "net/url_request/url_fetcher.h"
24 #include "net/url_request/url_request_context_getter.h"
25 #include "net/url_request/url_request_status.h"
26 
27 #if defined(OS_ANDROID)
28 #include "net/base/network_change_notifier.h"
29 #endif
30 
31 using base::Time;
32 using base::TimeDelta;
33 
34 namespace {
35 
36 // UpdateResult indicates what happened with the primary and/or backup update
37 // requests. The ordering of the values must stay the same for UMA consistency,
38 // and is also ordered in this way to match ProtocolManager::BackupUpdateReason.
39 enum UpdateResult {
40   UPDATE_RESULT_FAIL,
41   UPDATE_RESULT_SUCCESS,
42   UPDATE_RESULT_BACKUP_CONNECT_FAIL,
43   UPDATE_RESULT_BACKUP_CONNECT_SUCCESS,
44   UPDATE_RESULT_BACKUP_HTTP_FAIL,
45   UPDATE_RESULT_BACKUP_HTTP_SUCCESS,
46   UPDATE_RESULT_BACKUP_NETWORK_FAIL,
47   UPDATE_RESULT_BACKUP_NETWORK_SUCCESS,
48   UPDATE_RESULT_MAX,
49   UPDATE_RESULT_BACKUP_START = UPDATE_RESULT_BACKUP_CONNECT_FAIL,
50 };
51 
RecordUpdateResult(UpdateResult result)52 void RecordUpdateResult(UpdateResult result) {
53   DCHECK(result >= 0 && result < UPDATE_RESULT_MAX);
54   UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result, UPDATE_RESULT_MAX);
55 }
56 
57 }  // namespace
58 
59 // Minimum time, in seconds, from start up before we must issue an update query.
60 static const int kSbTimerStartIntervalSecMin = 60;
61 
62 // Maximum time, in seconds, from start up before we must issue an update query.
63 static const int kSbTimerStartIntervalSecMax = 300;
64 
65 // The maximum time, in seconds, to wait for a response to an update request.
66 static const int kSbMaxUpdateWaitSec = 30;
67 
68 // Maximum back off multiplier.
69 static const size_t kSbMaxBackOff = 8;
70 
71 // The default SBProtocolManagerFactory.
72 class SBProtocolManagerFactoryImpl : public SBProtocolManagerFactory {
73  public:
SBProtocolManagerFactoryImpl()74   SBProtocolManagerFactoryImpl() { }
~SBProtocolManagerFactoryImpl()75   virtual ~SBProtocolManagerFactoryImpl() { }
CreateProtocolManager(SafeBrowsingProtocolManagerDelegate * delegate,net::URLRequestContextGetter * request_context_getter,const SafeBrowsingProtocolConfig & config)76   virtual SafeBrowsingProtocolManager* CreateProtocolManager(
77       SafeBrowsingProtocolManagerDelegate* delegate,
78       net::URLRequestContextGetter* request_context_getter,
79       const SafeBrowsingProtocolConfig& config) OVERRIDE {
80     return new SafeBrowsingProtocolManager(
81         delegate, request_context_getter, config);
82   }
83  private:
84   DISALLOW_COPY_AND_ASSIGN(SBProtocolManagerFactoryImpl);
85 };
86 
87 // SafeBrowsingProtocolManager implementation ----------------------------------
88 
89 // static
90 SBProtocolManagerFactory* SafeBrowsingProtocolManager::factory_ = NULL;
91 
92 // static
Create(SafeBrowsingProtocolManagerDelegate * delegate,net::URLRequestContextGetter * request_context_getter,const SafeBrowsingProtocolConfig & config)93 SafeBrowsingProtocolManager* SafeBrowsingProtocolManager::Create(
94     SafeBrowsingProtocolManagerDelegate* delegate,
95     net::URLRequestContextGetter* request_context_getter,
96     const SafeBrowsingProtocolConfig& config) {
97   if (!factory_)
98     factory_ = new SBProtocolManagerFactoryImpl();
99   return factory_->CreateProtocolManager(
100       delegate, request_context_getter, config);
101 }
102 
SafeBrowsingProtocolManager(SafeBrowsingProtocolManagerDelegate * delegate,net::URLRequestContextGetter * request_context_getter,const SafeBrowsingProtocolConfig & config)103 SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
104     SafeBrowsingProtocolManagerDelegate* delegate,
105     net::URLRequestContextGetter* request_context_getter,
106     const SafeBrowsingProtocolConfig& config)
107     : delegate_(delegate),
108       request_type_(NO_REQUEST),
109       update_error_count_(0),
110       gethash_error_count_(0),
111       update_back_off_mult_(1),
112       gethash_back_off_mult_(1),
113       next_update_interval_(base::TimeDelta::FromSeconds(
114           base::RandInt(kSbTimerStartIntervalSecMin,
115                         kSbTimerStartIntervalSecMax))),
116       update_state_(FIRST_REQUEST),
117       chunk_pending_to_write_(false),
118       version_(config.version),
119       update_size_(0),
120       client_name_(config.client_name),
121       request_context_getter_(request_context_getter),
122       url_prefix_(config.url_prefix),
123       backup_update_reason_(BACKUP_UPDATE_REASON_MAX),
124       disable_auto_update_(config.disable_auto_update),
125       url_fetcher_id_(0),
126       app_in_foreground_(true) {
127   DCHECK(!url_prefix_.empty());
128 
129   backup_url_prefixes_[BACKUP_UPDATE_REASON_CONNECT] =
130       config.backup_connect_error_url_prefix;
131   backup_url_prefixes_[BACKUP_UPDATE_REASON_HTTP] =
132       config.backup_http_error_url_prefix;
133   backup_url_prefixes_[BACKUP_UPDATE_REASON_NETWORK] =
134       config.backup_network_error_url_prefix;
135 
136   // Set the backoff multiplier fuzz to a random value between 0 and 1.
137   back_off_fuzz_ = static_cast<float>(base::RandDouble());
138   if (version_.empty())
139     version_ = SafeBrowsingProtocolManagerHelper::Version();
140 }
141 
142 // static
RecordGetHashResult(bool is_download,ResultType result_type)143 void SafeBrowsingProtocolManager::RecordGetHashResult(
144     bool is_download, ResultType result_type) {
145   if (is_download) {
146     UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type,
147                               GET_HASH_RESULT_MAX);
148   } else {
149     UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type,
150                               GET_HASH_RESULT_MAX);
151   }
152 }
153 
IsUpdateScheduled() const154 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const {
155   return update_timer_.IsRunning();
156 }
157 
~SafeBrowsingProtocolManager()158 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
159   // Delete in-progress SafeBrowsing requests.
160   STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
161                                       hash_requests_.end());
162   hash_requests_.clear();
163 }
164 
165 // We can only have one update or chunk request outstanding, but there may be
166 // multiple GetHash requests pending since we don't want to serialize them and
167 // slow down the user.
GetFullHash(const std::vector<SBPrefix> & prefixes,FullHashCallback callback,bool is_download)168 void SafeBrowsingProtocolManager::GetFullHash(
169     const std::vector<SBPrefix>& prefixes,
170     FullHashCallback callback,
171     bool is_download) {
172   DCHECK(CalledOnValidThread());
173   // If we are in GetHash backoff, we need to check if we're past the next
174   // allowed time. If we are, we can proceed with the request. If not, we are
175   // required to return empty results (i.e. treat the page as safe).
176   if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
177     RecordGetHashResult(is_download, GET_HASH_BACKOFF_ERROR);
178     std::vector<SBFullHashResult> full_hashes;
179     callback.Run(full_hashes, base::TimeDelta());
180     return;
181   }
182   GURL gethash_url = GetHashUrl();
183   net::URLFetcher* fetcher = net::URLFetcher::Create(
184       url_fetcher_id_++, gethash_url, net::URLFetcher::POST, this);
185   hash_requests_[fetcher] = FullHashDetails(callback, is_download);
186 
187   const std::string get_hash = safe_browsing::FormatGetHash(prefixes);
188 
189   fetcher->SetLoadFlags(net::LOAD_DISABLE_CACHE);
190   fetcher->SetRequestContext(request_context_getter_.get());
191   fetcher->SetUploadData("text/plain", get_hash);
192   fetcher->Start();
193 }
194 
GetNextUpdate()195 void SafeBrowsingProtocolManager::GetNextUpdate() {
196   DCHECK(CalledOnValidThread());
197   if (request_.get() || request_type_ != NO_REQUEST)
198     return;
199 
200 #if defined(OS_ANDROID)
201   net::NetworkChangeNotifier::ConnectionType type =
202     net::NetworkChangeNotifier::GetConnectionType();
203   if (type != net::NetworkChangeNotifier::CONNECTION_WIFI) {
204     ScheduleNextUpdate(false /* no back off */);
205     return;
206   }
207 #endif
208 
209   IssueUpdateRequest();
210 }
211 
212 // net::URLFetcherDelegate implementation ----------------------------------
213 
214 // All SafeBrowsing request responses are handled here.
215 // TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
216 //              chunk should retry the download and parse of that chunk (and
217 //              what back off / how many times to try), and if that effects the
218 //              update back off. For now, a failed parse of the chunk means we
219 //              drop it. This isn't so bad because the next UPDATE_REQUEST we
220 //              do will report all the chunks we have. If that chunk is still
221 //              required, the SafeBrowsing servers will tell us to get it again.
OnURLFetchComplete(const net::URLFetcher * source)222 void SafeBrowsingProtocolManager::OnURLFetchComplete(
223     const net::URLFetcher* source) {
224   DCHECK(CalledOnValidThread());
225   scoped_ptr<const net::URLFetcher> fetcher;
226 
227   HashRequests::iterator it = hash_requests_.find(source);
228   if (it != hash_requests_.end()) {
229     // GetHash response.
230     fetcher.reset(it->first);
231     const FullHashDetails& details = it->second;
232     std::vector<SBFullHashResult> full_hashes;
233     base::TimeDelta cache_lifetime;
234     if (source->GetStatus().is_success() &&
235         (source->GetResponseCode() == 200 ||
236          source->GetResponseCode() == 204)) {
237       // For tracking our GetHash false positive (204) rate, compared to real
238       // (200) responses.
239       if (source->GetResponseCode() == 200)
240         RecordGetHashResult(details.is_download, GET_HASH_STATUS_200);
241       else
242         RecordGetHashResult(details.is_download, GET_HASH_STATUS_204);
243 
244       gethash_error_count_ = 0;
245       gethash_back_off_mult_ = 1;
246       std::string data;
247       source->GetResponseAsString(&data);
248       if (!safe_browsing::ParseGetHash(
249               data.data(), data.length(), &cache_lifetime, &full_hashes)) {
250         full_hashes.clear();
251         RecordGetHashResult(details.is_download, GET_HASH_PARSE_ERROR);
252         // TODO(cbentzel): Should cache_lifetime be set to 0 here? (See
253         // http://crbug.com/360232.)
254       }
255     } else {
256       HandleGetHashError(Time::Now());
257       if (source->GetStatus().status() == net::URLRequestStatus::FAILED) {
258         RecordGetHashResult(details.is_download, GET_HASH_NETWORK_ERROR);
259         VLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
260                 << " failed with error: " << source->GetStatus().error();
261       } else {
262         RecordGetHashResult(details.is_download, GET_HASH_HTTP_ERROR);
263         VLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
264                 << " failed with error: " << source->GetResponseCode();
265       }
266     }
267 
268     // Invoke the callback with full_hashes, even if there was a parse error or
269     // an error response code (in which case full_hashes will be empty). The
270     // caller can't be blocked indefinitely.
271     details.callback.Run(full_hashes, cache_lifetime);
272 
273     hash_requests_.erase(it);
274   } else {
275     // Update or chunk response.
276     fetcher.reset(request_.release());
277 
278     if (request_type_ == UPDATE_REQUEST ||
279         request_type_ == BACKUP_UPDATE_REQUEST) {
280       if (!fetcher.get()) {
281         // We've timed out waiting for an update response, so we've cancelled
282         // the update request and scheduled a new one. Ignore this response.
283         return;
284       }
285 
286       // Cancel the update response timeout now that we have the response.
287       timeout_timer_.Stop();
288     }
289 
290     net::URLRequestStatus status = source->GetStatus();
291     if (status.is_success() && source->GetResponseCode() == 200) {
292       // We have data from the SafeBrowsing service.
293       std::string data;
294       source->GetResponseAsString(&data);
295 
296       // TODO(shess): Cleanup the flow of this code so that |parsed_ok| can be
297       // removed or omitted.
298       const bool parsed_ok = HandleServiceResponse(
299           source->GetURL(), data.data(), data.length());
300       if (!parsed_ok) {
301         VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
302                 << " failed parse.";
303         chunk_request_urls_.clear();
304         if (request_type_ == UPDATE_REQUEST &&
305             IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_HTTP)) {
306           return;
307         }
308         UpdateFinished(false);
309       }
310 
311       switch (request_type_) {
312         case CHUNK_REQUEST:
313           if (parsed_ok) {
314             chunk_request_urls_.pop_front();
315             if (chunk_request_urls_.empty() && !chunk_pending_to_write_)
316               UpdateFinished(true);
317           }
318           break;
319         case UPDATE_REQUEST:
320         case BACKUP_UPDATE_REQUEST:
321           if (chunk_request_urls_.empty() && parsed_ok) {
322             // We are up to date since the servers gave us nothing new, so we
323             // are done with this update cycle.
324             UpdateFinished(true);
325           }
326           break;
327         case NO_REQUEST:
328           // This can happen if HandleServiceResponse fails above.
329           break;
330         default:
331           NOTREACHED();
332           break;
333       }
334     } else {
335       if (status.status() == net::URLRequestStatus::FAILED) {
336         VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
337                 << " failed with error: " << source->GetStatus().error();
338       } else {
339         VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
340                 << " failed with error: " << source->GetResponseCode();
341       }
342       if (request_type_ == CHUNK_REQUEST) {
343         // The SafeBrowsing service error, or very bad response code: back off.
344         chunk_request_urls_.clear();
345       } else if (request_type_ == UPDATE_REQUEST) {
346         BackupUpdateReason backup_update_reason = BACKUP_UPDATE_REASON_MAX;
347         if (status.is_success()) {
348           backup_update_reason = BACKUP_UPDATE_REASON_HTTP;
349         } else {
350           switch (status.error()) {
351             case net::ERR_INTERNET_DISCONNECTED:
352             case net::ERR_NETWORK_CHANGED:
353               backup_update_reason = BACKUP_UPDATE_REASON_NETWORK;
354               break;
355             default:
356               backup_update_reason = BACKUP_UPDATE_REASON_CONNECT;
357               break;
358           }
359         }
360         if (backup_update_reason != BACKUP_UPDATE_REASON_MAX &&
361             IssueBackupUpdateRequest(backup_update_reason)) {
362           return;
363         }
364       }
365       UpdateFinished(false);
366     }
367   }
368 
369   // Get the next chunk if available.
370   IssueChunkRequest();
371 }
372 
HandleServiceResponse(const GURL & url,const char * data,size_t length)373 bool SafeBrowsingProtocolManager::HandleServiceResponse(
374     const GURL& url, const char* data, size_t length) {
375   DCHECK(CalledOnValidThread());
376 
377   switch (request_type_) {
378     case UPDATE_REQUEST:
379     case BACKUP_UPDATE_REQUEST: {
380       size_t next_update_sec = 0;
381       bool reset = false;
382       scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
383           new std::vector<SBChunkDelete>);
384       std::vector<ChunkUrl> chunk_urls;
385       if (!safe_browsing::ParseUpdate(data, length, &next_update_sec, &reset,
386                                       chunk_deletes.get(), &chunk_urls)) {
387         return false;
388       }
389 
390       base::TimeDelta next_update_interval =
391           base::TimeDelta::FromSeconds(next_update_sec);
392       last_update_ = Time::Now();
393 
394       if (update_state_ == FIRST_REQUEST)
395         update_state_ = SECOND_REQUEST;
396       else if (update_state_ == SECOND_REQUEST)
397         update_state_ = NORMAL_REQUEST;
398 
399       // New time for the next update.
400       if (next_update_interval > base::TimeDelta()) {
401         next_update_interval_ = next_update_interval;
402       } else if (update_state_ == SECOND_REQUEST) {
403         next_update_interval_ = base::TimeDelta::FromSeconds(
404             base::RandInt(15, 45));
405       }
406 
407       // New chunks to download.
408       if (!chunk_urls.empty()) {
409         UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
410         for (size_t i = 0; i < chunk_urls.size(); ++i)
411           chunk_request_urls_.push_back(chunk_urls[i]);
412       }
413 
414       // Handle the case were the SafeBrowsing service tells us to dump our
415       // database.
416       if (reset) {
417         delegate_->ResetDatabase();
418         return true;
419       }
420 
421       // Chunks to delete from our storage.
422       if (!chunk_deletes->empty())
423         delegate_->DeleteChunks(chunk_deletes.Pass());
424 
425       break;
426     }
427     case CHUNK_REQUEST: {
428       UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
429                           base::Time::Now() - chunk_request_start_);
430 
431       const ChunkUrl chunk_url = chunk_request_urls_.front();
432       scoped_ptr<ScopedVector<SBChunkData> >
433           chunks(new ScopedVector<SBChunkData>);
434       UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
435       update_size_ += length;
436       if (!safe_browsing::ParseChunk(data, length, chunks.get()))
437         return false;
438 
439       // Chunks to add to storage.  Pass ownership of |chunks|.
440       if (!chunks->empty()) {
441         chunk_pending_to_write_ = true;
442         delegate_->AddChunks(
443             chunk_url.list_name, chunks.Pass(),
444             base::Bind(&SafeBrowsingProtocolManager::OnAddChunksComplete,
445                        base::Unretained(this)));
446       }
447 
448       break;
449     }
450 
451     default:
452       return false;
453   }
454 
455   return true;
456 }
457 
Initialize()458 void SafeBrowsingProtocolManager::Initialize() {
459   DCHECK(CalledOnValidThread());
460   // Don't want to hit the safe browsing servers on build/chrome bots.
461   scoped_ptr<base::Environment> env(base::Environment::Create());
462   if (env->HasVar(env_vars::kHeadless))
463     return;
464   ScheduleNextUpdate(false /* no back off */);
465 }
466 
ScheduleNextUpdate(bool back_off)467 void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
468   DCHECK(CalledOnValidThread());
469   if (disable_auto_update_) {
470     // Unschedule any current timer.
471     update_timer_.Stop();
472     return;
473   }
474   // Reschedule with the new update.
475   base::TimeDelta next_update_interval = GetNextUpdateInterval(back_off);
476   ForceScheduleNextUpdate(next_update_interval);
477 }
478 
ForceScheduleNextUpdate(base::TimeDelta interval)479 void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
480     base::TimeDelta interval) {
481   DCHECK(CalledOnValidThread());
482   DCHECK(interval >= base::TimeDelta());
483   // Unschedule any current timer.
484   update_timer_.Stop();
485   update_timer_.Start(FROM_HERE, interval, this,
486                       &SafeBrowsingProtocolManager::GetNextUpdate);
487 }
488 
489 // According to section 5 of the SafeBrowsing protocol specification, we must
490 // back off after a certain number of errors. We only change |next_update_sec_|
491 // when we receive a response from the SafeBrowsing service.
GetNextUpdateInterval(bool back_off)492 base::TimeDelta SafeBrowsingProtocolManager::GetNextUpdateInterval(
493     bool back_off) {
494   DCHECK(CalledOnValidThread());
495   DCHECK(next_update_interval_ > base::TimeDelta());
496   base::TimeDelta next = next_update_interval_;
497   if (back_off) {
498     next = GetNextBackOffInterval(&update_error_count_, &update_back_off_mult_);
499   } else {
500     // Successful response means error reset.
501     update_error_count_ = 0;
502     update_back_off_mult_ = 1;
503   }
504   return next;
505 }
506 
GetNextBackOffInterval(size_t * error_count,size_t * multiplier) const507 base::TimeDelta SafeBrowsingProtocolManager::GetNextBackOffInterval(
508     size_t* error_count, size_t* multiplier) const {
509   DCHECK(CalledOnValidThread());
510   DCHECK(multiplier && error_count);
511   (*error_count)++;
512   if (*error_count > 1 && *error_count < 6) {
513     base::TimeDelta next = base::TimeDelta::FromMinutes(
514         *multiplier * (1 + back_off_fuzz_) * 30);
515     *multiplier *= 2;
516     if (*multiplier > kSbMaxBackOff)
517       *multiplier = kSbMaxBackOff;
518     return next;
519   }
520   if (*error_count >= 6)
521     return base::TimeDelta::FromHours(8);
522   return base::TimeDelta::FromMinutes(1);
523 }
524 
525 // This request requires getting a list of all the chunks for each list from the
526 // database asynchronously. The request will be issued when we're called back in
527 // OnGetChunksComplete.
528 // TODO(paulg): We should get this at start up and maintain a ChunkRange cache
529 //              to avoid hitting the database with each update request. On the
530 //              otherhand, this request will only occur ~20-30 minutes so there
531 //              isn't that much overhead. Measure!
IssueUpdateRequest()532 void SafeBrowsingProtocolManager::IssueUpdateRequest() {
533   DCHECK(CalledOnValidThread());
534   request_type_ = UPDATE_REQUEST;
535   delegate_->UpdateStarted();
536   delegate_->GetChunks(
537       base::Bind(&SafeBrowsingProtocolManager::OnGetChunksComplete,
538                  base::Unretained(this)));
539 }
540 
541 // The backup request can run immediately since the chunks have already been
542 // retrieved from the DB.
IssueBackupUpdateRequest(BackupUpdateReason backup_update_reason)543 bool SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
544     BackupUpdateReason backup_update_reason) {
545   DCHECK(CalledOnValidThread());
546   DCHECK_EQ(request_type_, UPDATE_REQUEST);
547   DCHECK(backup_update_reason >= 0 &&
548          backup_update_reason < BACKUP_UPDATE_REASON_MAX);
549   if (backup_url_prefixes_[backup_update_reason].empty())
550     return false;
551   request_type_ = BACKUP_UPDATE_REQUEST;
552   backup_update_reason_ = backup_update_reason;
553 
554   GURL backup_update_url = BackupUpdateUrl(backup_update_reason);
555   request_.reset(net::URLFetcher::Create(
556       url_fetcher_id_++, backup_update_url, net::URLFetcher::POST, this));
557   request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
558   request_->SetRequestContext(request_context_getter_.get());
559   request_->SetUploadData("text/plain", update_list_data_);
560   request_->Start();
561 
562   // Begin the update request timeout.
563   timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
564                        this,
565                        &SafeBrowsingProtocolManager::UpdateResponseTimeout);
566 
567   return true;
568 }
569 
IssueChunkRequest()570 void SafeBrowsingProtocolManager::IssueChunkRequest() {
571   DCHECK(CalledOnValidThread());
572   // We are only allowed to have one request outstanding at any time.  Also,
573   // don't get the next url until the previous one has been written to disk so
574   // that we don't use too much memory.
575   if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
576     return;
577 
578   ChunkUrl next_chunk = chunk_request_urls_.front();
579   DCHECK(!next_chunk.url.empty());
580   GURL chunk_url = NextChunkUrl(next_chunk.url);
581   request_type_ = CHUNK_REQUEST;
582   request_.reset(net::URLFetcher::Create(
583       url_fetcher_id_++, chunk_url, net::URLFetcher::GET, this));
584   request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
585   request_->SetRequestContext(request_context_getter_.get());
586   chunk_request_start_ = base::Time::Now();
587   request_->Start();
588 }
589 
OnGetChunksComplete(const std::vector<SBListChunkRanges> & lists,bool database_error)590 void SafeBrowsingProtocolManager::OnGetChunksComplete(
591     const std::vector<SBListChunkRanges>& lists, bool database_error) {
592   DCHECK(CalledOnValidThread());
593   DCHECK_EQ(request_type_, UPDATE_REQUEST);
594   DCHECK(update_list_data_.empty());
595   if (database_error) {
596     // The update was not successful, but don't back off.
597     UpdateFinished(false, false);
598     return;
599   }
600 
601   // Format our stored chunks:
602   bool found_malware = false;
603   bool found_phishing = false;
604   for (size_t i = 0; i < lists.size(); ++i) {
605     update_list_data_.append(safe_browsing::FormatList(lists[i]));
606     if (lists[i].name == safe_browsing_util::kPhishingList)
607       found_phishing = true;
608 
609     if (lists[i].name == safe_browsing_util::kMalwareList)
610       found_malware = true;
611   }
612 
613   // If we have an empty database, let the server know we want data for these
614   // lists.
615   // TODO(shess): These cases never happen because the database fills in the
616   // lists in GetChunks().  Refactor the unit tests so that this code can be
617   // removed.
618   if (!found_phishing) {
619     update_list_data_.append(safe_browsing::FormatList(
620         SBListChunkRanges(safe_browsing_util::kPhishingList)));
621   }
622   if (!found_malware) {
623     update_list_data_.append(safe_browsing::FormatList(
624         SBListChunkRanges(safe_browsing_util::kMalwareList)));
625   }
626 
627   // Large requests are (probably) a sign of database corruption.
628   // Record stats to inform decisions about whether to automate
629   // deletion of such databases.  http://crbug.com/120219
630   UMA_HISTOGRAM_COUNTS("SB2.UpdateRequestSize", update_list_data_.size());
631 
632   GURL update_url = UpdateUrl();
633   request_.reset(net::URLFetcher::Create(
634       url_fetcher_id_++, update_url, net::URLFetcher::POST, this));
635   request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
636   request_->SetRequestContext(request_context_getter_.get());
637   request_->SetUploadData("text/plain", update_list_data_);
638   request_->Start();
639 
640   // Begin the update request timeout.
641   timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
642                        this,
643                        &SafeBrowsingProtocolManager::UpdateResponseTimeout);
644 }
645 
646 // If we haven't heard back from the server with an update response, this method
647 // will run. Close the current update session and schedule another update.
UpdateResponseTimeout()648 void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
649   DCHECK(CalledOnValidThread());
650   DCHECK(request_type_ == UPDATE_REQUEST ||
651          request_type_ == BACKUP_UPDATE_REQUEST);
652   request_.reset();
653   if (request_type_ == UPDATE_REQUEST &&
654       IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_CONNECT)) {
655     return;
656   }
657   UpdateFinished(false);
658 }
659 
OnAddChunksComplete()660 void SafeBrowsingProtocolManager::OnAddChunksComplete() {
661   DCHECK(CalledOnValidThread());
662   chunk_pending_to_write_ = false;
663 
664   if (chunk_request_urls_.empty()) {
665     UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
666     UpdateFinished(true);
667   } else {
668     IssueChunkRequest();
669   }
670 }
671 
HandleGetHashError(const Time & now)672 void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
673   DCHECK(CalledOnValidThread());
674   base::TimeDelta next = GetNextBackOffInterval(
675       &gethash_error_count_, &gethash_back_off_mult_);
676   next_gethash_time_ = now + next;
677 }
678 
UpdateFinished(bool success)679 void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
680   UpdateFinished(success, !success);
681 }
682 
UpdateFinished(bool success,bool back_off)683 void SafeBrowsingProtocolManager::UpdateFinished(bool success, bool back_off) {
684   DCHECK(CalledOnValidThread());
685 #if defined(OS_ANDROID)
686   if (app_in_foreground_)
687     UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeForeground", update_size_);
688   else
689     UMA_HISTOGRAM_COUNTS("SB2.UpdateSizeBackground", update_size_);
690 #endif
691   UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
692   update_size_ = 0;
693   bool update_success = success || request_type_ == CHUNK_REQUEST;
694   if (backup_update_reason_ == BACKUP_UPDATE_REASON_MAX) {
695     RecordUpdateResult(
696         update_success ? UPDATE_RESULT_SUCCESS : UPDATE_RESULT_FAIL);
697   } else {
698     UpdateResult update_result = static_cast<UpdateResult>(
699           UPDATE_RESULT_BACKUP_START +
700           (static_cast<int>(backup_update_reason_) * 2) +
701           update_success);
702     RecordUpdateResult(update_result);
703   }
704   backup_update_reason_ = BACKUP_UPDATE_REASON_MAX;
705   request_type_ = NO_REQUEST;
706   update_list_data_.clear();
707   delegate_->UpdateFinished(success);
708   ScheduleNextUpdate(back_off);
709 }
710 
UpdateUrl() const711 GURL SafeBrowsingProtocolManager::UpdateUrl() const {
712   std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
713       url_prefix_, "downloads", client_name_, version_, additional_query_);
714   return GURL(url);
715 }
716 
BackupUpdateUrl(BackupUpdateReason backup_update_reason) const717 GURL SafeBrowsingProtocolManager::BackupUpdateUrl(
718     BackupUpdateReason backup_update_reason) const {
719   DCHECK(backup_update_reason >= 0 &&
720          backup_update_reason < BACKUP_UPDATE_REASON_MAX);
721   DCHECK(!backup_url_prefixes_[backup_update_reason].empty());
722   std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
723       backup_url_prefixes_[backup_update_reason], "downloads", client_name_,
724       version_, additional_query_);
725   return GURL(url);
726 }
727 
GetHashUrl() const728 GURL SafeBrowsingProtocolManager::GetHashUrl() const {
729   std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
730       url_prefix_, "gethash", client_name_, version_, additional_query_);
731   return GURL(url);
732 }
733 
NextChunkUrl(const std::string & url) const734 GURL SafeBrowsingProtocolManager::NextChunkUrl(const std::string& url) const {
735   DCHECK(CalledOnValidThread());
736   std::string next_url;
737   if (!StartsWithASCII(url, "http://", false) &&
738       !StartsWithASCII(url, "https://", false)) {
739     // Use https if we updated via https, otherwise http (useful for testing).
740     if (StartsWithASCII(url_prefix_, "https://", false))
741       next_url.append("https://");
742     else
743       next_url.append("http://");
744     next_url.append(url);
745   } else {
746     next_url = url;
747   }
748   if (!additional_query_.empty()) {
749     if (next_url.find("?") != std::string::npos) {
750       next_url.append("&");
751     } else {
752       next_url.append("?");
753     }
754     next_url.append(additional_query_);
755   }
756   return GURL(next_url);
757 }
758 
FullHashDetails()759 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails()
760     : callback(),
761       is_download(false) {
762 }
763 
FullHashDetails(FullHashCallback callback,bool is_download)764 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails(
765     FullHashCallback callback, bool is_download)
766     : callback(callback),
767       is_download(is_download) {
768 }
769 
~FullHashDetails()770 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {
771 }
772 
~SafeBrowsingProtocolManagerDelegate()773 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {
774 }
775