• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "chrome/browser/sync/syncable/syncable.h"
6 
7 #include "build/build_config.h"
8 
9 #include <sys/stat.h>
10 #if defined(OS_POSIX)
11 #include <sys/time.h>
12 #endif
13 #include <sys/types.h>
14 #include <time.h>
15 #if defined(OS_MACOSX)
16 #include <CoreFoundation/CoreFoundation.h>
17 #elif defined(OS_WIN)
18 #include <shlwapi.h>  // for PathMatchSpec
19 #endif
20 
21 #include <algorithm>
22 #include <cstring>
23 #include <functional>
24 #include <iomanip>
25 #include <iterator>
26 #include <limits>
27 #include <set>
28 #include <string>
29 
30 #include "base/hash_tables.h"
31 #include "base/file_util.h"
32 #include "base/logging.h"
33 #include "base/memory/scoped_ptr.h"
34 #include "base/perftimer.h"
35 #include "base/string_number_conversions.h"
36 #include "base/string_util.h"
37 #include "base/stl_util-inl.h"
38 #include "base/time.h"
39 #include "base/values.h"
40 #include "chrome/browser/sync/engine/syncer.h"
41 #include "chrome/browser/sync/engine/syncer_util.h"
42 #include "chrome/browser/sync/protocol/proto_value_conversions.h"
43 #include "chrome/browser/sync/protocol/service_constants.h"
44 #include "chrome/browser/sync/syncable/directory_backing_store.h"
45 #include "chrome/browser/sync/syncable/directory_change_listener.h"
46 #include "chrome/browser/sync/syncable/directory_manager.h"
47 #include "chrome/browser/sync/syncable/model_type.h"
48 #include "chrome/browser/sync/syncable/syncable-inl.h"
49 #include "chrome/browser/sync/syncable/syncable_changes_version.h"
50 #include "chrome/browser/sync/syncable/syncable_columns.h"
51 #include "chrome/browser/sync/syncable/syncable_enum_conversions.h"
52 #include "chrome/browser/sync/util/crypto_helpers.h"
53 #include "chrome/common/deprecated/event_sys-inl.h"
54 #include "net/base/escape.h"
55 
56 namespace {
57 enum InvariantCheckLevel {
58   OFF = 0,
59   VERIFY_IN_MEMORY = 1,
60   FULL_DB_VERIFICATION = 2
61 };
62 
63 static const InvariantCheckLevel kInvariantCheckLevel = VERIFY_IN_MEMORY;
64 
65 // Max number of milliseconds to spend checking syncable entry invariants
66 static const int kInvariantCheckMaxMs = 50;
67 }  // namespace
68 
69 using browser_sync::SyncerUtil;
70 using std::string;
71 
72 
73 namespace syncable {
74 
Now()75 int64 Now() {
76 #if defined(OS_WIN)
77   FILETIME filetime;
78   SYSTEMTIME systime;
79   GetSystemTime(&systime);
80   SystemTimeToFileTime(&systime, &filetime);
81   // MSDN recommends converting via memcpy like this.
82   LARGE_INTEGER n;
83   memcpy(&n, &filetime, sizeof(filetime));
84   return n.QuadPart;
85 #elif defined(OS_POSIX)
86   struct timeval tv;
87   gettimeofday(&tv, NULL);
88   return static_cast<int64>(tv.tv_sec);
89 #else
90 #error NEED OS SPECIFIC Now() implementation
91 #endif
92 }
93 
94 namespace {
95 
96 // A ScopedIndexUpdater temporarily removes an entry from an index,
97 // and restores it to the index when the scope exits.  This simplifies
98 // the common pattern where items need to be removed from an index
99 // before updating the field.
100 //
101 // This class is parameterized on the Indexer traits type, which
102 // must define a Comparator and a static bool ShouldInclude
103 // function for testing whether the item ought to be included
104 // in the index.
105 template<typename Indexer>
106 class ScopedIndexUpdater {
107  public:
ScopedIndexUpdater(const ScopedKernelLock & proof_of_lock,EntryKernel * entry,typename Index<Indexer>::Set * index)108   ScopedIndexUpdater(const ScopedKernelLock& proof_of_lock,
109                      EntryKernel* entry,
110                      typename Index<Indexer>::Set* index)
111       : entry_(entry),
112         index_(index) {
113     // First call to ShouldInclude happens before the field is updated.
114     if (Indexer::ShouldInclude(entry_)) {
115       CHECK(index_->erase(entry_));
116     }
117   }
118 
~ScopedIndexUpdater()119   ~ScopedIndexUpdater() {
120     // Second call to ShouldInclude happens after the field is updated.
121     if (Indexer::ShouldInclude(entry_)) {
122       CHECK(index_->insert(entry_).second);
123     }
124   }
125  private:
126   // The entry that was temporarily removed from the index.
127   EntryKernel* entry_;
128   // The index which we are updating.
129   typename Index<Indexer>::Set* const index_;
130 };
131 
132 // Helper function to add an item to the index, if it ought to be added.
133 template<typename Indexer>
InitializeIndexEntry(EntryKernel * entry,typename Index<Indexer>::Set * index)134 void InitializeIndexEntry(EntryKernel* entry,
135                           typename Index<Indexer>::Set* index) {
136   if (Indexer::ShouldInclude(entry)) {
137     index->insert(entry);
138   }
139 }
140 
141 }  // namespace
142 
143 ///////////////////////////////////////////////////////////////////////////
144 // Comparator and filter functions for the indices.
145 
146 // static
ShouldInclude(const EntryKernel * a)147 bool ClientTagIndexer::ShouldInclude(const EntryKernel* a) {
148   return !a->ref(UNIQUE_CLIENT_TAG).empty();
149 }
150 
operator ()(const syncable::EntryKernel * a,const syncable::EntryKernel * b) const151 bool ParentIdAndHandleIndexer::Comparator::operator() (
152     const syncable::EntryKernel* a,
153     const syncable::EntryKernel* b) const {
154   int cmp = a->ref(PARENT_ID).compare(b->ref(PARENT_ID));
155   if (cmp != 0)
156     return cmp < 0;
157 
158   int64 a_position = a->ref(SERVER_POSITION_IN_PARENT);
159   int64 b_position = b->ref(SERVER_POSITION_IN_PARENT);
160   if (a_position != b_position)
161     return a_position < b_position;
162 
163   cmp = a->ref(ID).compare(b->ref(ID));
164   return cmp < 0;
165 }
166 
167 // static
ShouldInclude(const EntryKernel * a)168 bool ParentIdAndHandleIndexer::ShouldInclude(const EntryKernel* a) {
169   // This index excludes deleted items and the root item.  The root
170   // item is excluded so that it doesn't show up as a child of itself.
171   return !a->ref(IS_DEL) && !a->ref(ID).IsRoot();
172 }
173 
174 ///////////////////////////////////////////////////////////////////////////
175 // EntryKernel
176 
EntryKernel()177 EntryKernel::EntryKernel() : dirty_(false) {
178   memset(int64_fields, 0, sizeof(int64_fields));
179 }
180 
~EntryKernel()181 EntryKernel::~EntryKernel() {}
182 
183 namespace {
184 
185 // Utility function to loop through a set of enum values and add the
186 // field keys/values in the kernel to the given dictionary.
187 //
188 // V should be convertible to Value.
189 template <class T, class U, class V>
SetFieldValues(const EntryKernel & kernel,DictionaryValue * dictionary_value,const char * (* enum_key_fn)(T),V * (* enum_value_fn)(U),int field_key_min,int field_key_max)190 void SetFieldValues(const EntryKernel& kernel,
191                     DictionaryValue* dictionary_value,
192                     const char* (*enum_key_fn)(T),
193                     V* (*enum_value_fn)(U),
194                     int field_key_min, int field_key_max) {
195   DCHECK_LE(field_key_min, field_key_max);
196   for (int i = field_key_min; i <= field_key_max; ++i) {
197     T field = static_cast<T>(i);
198     const std::string& key = enum_key_fn(field);
199     V* value = enum_value_fn(kernel.ref(field));
200     dictionary_value->Set(key, value);
201   }
202 }
203 
204 // Helper functions for SetFieldValues().
205 
Int64ToValue(int64 i)206 StringValue* Int64ToValue(int64 i) {
207   return Value::CreateStringValue(base::Int64ToString(i));
208 }
209 
IdToValue(const Id & id)210 StringValue* IdToValue(const Id& id) {
211   return id.ToValue();
212 }
213 
214 }  // namespace
215 
ToValue() const216 DictionaryValue* EntryKernel::ToValue() const {
217   DictionaryValue* kernel_info = new DictionaryValue();
218   kernel_info->SetBoolean("isDirty", is_dirty());
219 
220   // Int64 fields.
221   SetFieldValues(*this, kernel_info,
222                  &GetMetahandleFieldString, &Int64ToValue,
223                  INT64_FIELDS_BEGIN, META_HANDLE);
224   SetFieldValues(*this, kernel_info,
225                  &GetBaseVersionString, &Int64ToValue,
226                  META_HANDLE + 1, BASE_VERSION);
227   SetFieldValues(*this, kernel_info,
228                  &GetInt64FieldString, &Int64ToValue,
229                  BASE_VERSION + 1, INT64_FIELDS_END - 1);
230 
231   // ID fields.
232   SetFieldValues(*this, kernel_info,
233                  &GetIdFieldString, &IdToValue,
234                  ID_FIELDS_BEGIN, ID_FIELDS_END - 1);
235 
236   // Bit fields.
237   SetFieldValues(*this, kernel_info,
238                  &GetIndexedBitFieldString, &Value::CreateBooleanValue,
239                  BIT_FIELDS_BEGIN, INDEXED_BIT_FIELDS_END - 1);
240   SetFieldValues(*this, kernel_info,
241                  &GetIsDelFieldString, &Value::CreateBooleanValue,
242                  INDEXED_BIT_FIELDS_END, IS_DEL);
243   SetFieldValues(*this, kernel_info,
244                  &GetBitFieldString, &Value::CreateBooleanValue,
245                  IS_DEL + 1, BIT_FIELDS_END - 1);
246 
247   // String fields.
248   {
249     // Pick out the function overload we want.
250     StringValue* (*string_to_value)(const std::string&) =
251         &Value::CreateStringValue;
252     SetFieldValues(*this, kernel_info,
253                    &GetStringFieldString, string_to_value,
254                    STRING_FIELDS_BEGIN, STRING_FIELDS_END - 1);
255   }
256 
257   // Proto fields.
258   SetFieldValues(*this, kernel_info,
259                  &GetProtoFieldString, &browser_sync::EntitySpecificsToValue,
260                  PROTO_FIELDS_BEGIN, PROTO_FIELDS_END - 1);
261 
262   // Bit temps.
263   SetFieldValues(*this, kernel_info,
264                  &GetBitTempString, &Value::CreateBooleanValue,
265                  BIT_TEMPS_BEGIN, BIT_TEMPS_END - 1);
266 
267   return kernel_info;
268 }
269 
270 ///////////////////////////////////////////////////////////////////////////
271 // Directory
272 
init_kernel(const std::string & name)273 void Directory::init_kernel(const std::string& name) {
274   DCHECK(kernel_ == NULL);
275   kernel_ = new Kernel(FilePath(), name, KernelLoadInfo());
276 }
277 
PersistedKernelInfo()278 Directory::PersistedKernelInfo::PersistedKernelInfo()
279     : next_id(0) {
280   for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
281     reset_download_progress(ModelTypeFromInt(i));
282   }
283   autofill_migration_state = NOT_DETERMINED;
284   memset(&autofill_migration_debug_info, 0,
285          sizeof(autofill_migration_debug_info));
286 }
287 
~PersistedKernelInfo()288 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
289 
reset_download_progress(ModelType model_type)290 void Directory::PersistedKernelInfo::reset_download_progress(
291     ModelType model_type) {
292   download_progress[model_type].set_data_type_id(
293       GetExtensionFieldNumberFromModelType(model_type));
294   // An empty-string token indicates no prior knowledge.
295   download_progress[model_type].set_token(std::string());
296 }
297 
SaveChangesSnapshot()298 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
299     : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
300 }
301 
~SaveChangesSnapshot()302 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {}
303 
Kernel(const FilePath & db_path,const string & name,const KernelLoadInfo & info)304 Directory::Kernel::Kernel(const FilePath& db_path,
305                           const string& name,
306                           const KernelLoadInfo& info)
307     : db_path(db_path),
308       refcount(1),
309       name(name),
310       metahandles_index(new Directory::MetahandlesIndex),
311       ids_index(new Directory::IdsIndex),
312       parent_id_child_index(new Directory::ParentIdChildIndex),
313       client_tag_index(new Directory::ClientTagIndex),
314       unapplied_update_metahandles(new MetahandleSet),
315       unsynced_metahandles(new MetahandleSet),
316       dirty_metahandles(new MetahandleSet),
317       metahandles_to_purge(new MetahandleSet),
318       channel(new Directory::Channel(syncable::DIRECTORY_DESTROYED)),
319       change_listener_(NULL),
320       info_status(Directory::KERNEL_SHARE_INFO_VALID),
321       persisted_info(info.kernel_info),
322       cache_guid(info.cache_guid),
323       next_metahandle(info.max_metahandle + 1) {
324 }
325 
AddRef()326 void Directory::Kernel::AddRef() {
327   base::subtle::NoBarrier_AtomicIncrement(&refcount, 1);
328 }
329 
Release()330 void Directory::Kernel::Release() {
331   if (!base::subtle::NoBarrier_AtomicIncrement(&refcount, -1))
332     delete this;
333 }
334 
~Kernel()335 Directory::Kernel::~Kernel() {
336   CHECK_EQ(0, refcount);
337   delete channel;
338   delete unsynced_metahandles;
339   delete unapplied_update_metahandles;
340   delete dirty_metahandles;
341   delete metahandles_to_purge;
342   delete parent_id_child_index;
343   delete client_tag_index;
344   delete ids_index;
345   STLDeleteElements(metahandles_index);
346   delete metahandles_index;
347 }
348 
Directory()349 Directory::Directory() : kernel_(NULL), store_(NULL) {
350 }
351 
~Directory()352 Directory::~Directory() {
353   Close();
354 }
355 
Open(const FilePath & file_path,const string & name)356 DirOpenResult Directory::Open(const FilePath& file_path, const string& name) {
357   const DirOpenResult result = OpenImpl(file_path, name);
358   if (OPENED != result)
359     Close();
360   return result;
361 }
362 
InitializeIndices()363 void Directory::InitializeIndices() {
364   MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
365   for (; it != kernel_->metahandles_index->end(); ++it) {
366     EntryKernel* entry = *it;
367     InitializeIndexEntry<ParentIdAndHandleIndexer>(entry,
368         kernel_->parent_id_child_index);
369     InitializeIndexEntry<IdIndexer>(entry, kernel_->ids_index);
370     InitializeIndexEntry<ClientTagIndexer>(entry, kernel_->client_tag_index);
371     if (entry->ref(IS_UNSYNCED))
372       kernel_->unsynced_metahandles->insert(entry->ref(META_HANDLE));
373     if (entry->ref(IS_UNAPPLIED_UPDATE))
374       kernel_->unapplied_update_metahandles->insert(entry->ref(META_HANDLE));
375     DCHECK(!entry->is_dirty());
376   }
377 }
378 
CreateBackingStore(const string & dir_name,const FilePath & backing_filepath)379 DirectoryBackingStore* Directory::CreateBackingStore(
380     const string& dir_name, const FilePath& backing_filepath) {
381   return new DirectoryBackingStore(dir_name, backing_filepath);
382 }
383 
OpenImpl(const FilePath & file_path,const string & name)384 DirOpenResult Directory::OpenImpl(const FilePath& file_path,
385                                   const string& name) {
386   DCHECK_EQ(static_cast<DirectoryBackingStore*>(NULL), store_);
387   FilePath db_path(file_path);
388   file_util::AbsolutePath(&db_path);
389   store_ = CreateBackingStore(name, db_path);
390 
391   KernelLoadInfo info;
392   // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
393   // swap these later.
394   MetahandlesIndex metas_bucket;
395   DirOpenResult result = store_->Load(&metas_bucket, &info);
396   if (OPENED != result)
397     return result;
398 
399   kernel_ = new Kernel(db_path, name, info);
400   kernel_->metahandles_index->swap(metas_bucket);
401   InitializeIndices();
402   return OPENED;
403 }
404 
Close()405 void Directory::Close() {
406   if (store_)
407     delete store_;
408   store_ = NULL;
409   if (kernel_) {
410     bool del = !base::subtle::NoBarrier_AtomicIncrement(&kernel_->refcount, -1);
411     DCHECK(del) << "Kernel should only have a single ref";
412     if (del)
413       delete kernel_;
414     kernel_ = NULL;
415   }
416 }
417 
GetEntryById(const Id & id)418 EntryKernel* Directory::GetEntryById(const Id& id) {
419   ScopedKernelLock lock(this);
420   return GetEntryById(id, &lock);
421 }
422 
GetEntryById(const Id & id,ScopedKernelLock * const lock)423 EntryKernel* Directory::GetEntryById(const Id& id,
424                                      ScopedKernelLock* const lock) {
425   DCHECK(kernel_);
426   // Find it in the in memory ID index.
427   kernel_->needle.put(ID, id);
428   IdsIndex::iterator id_found = kernel_->ids_index->find(&kernel_->needle);
429   if (id_found != kernel_->ids_index->end()) {
430     return *id_found;
431   }
432   return NULL;
433 }
434 
GetEntryByClientTag(const string & tag)435 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
436   ScopedKernelLock lock(this);
437   DCHECK(kernel_);
438   // Find it in the ClientTagIndex.
439   kernel_->needle.put(UNIQUE_CLIENT_TAG, tag);
440   ClientTagIndex::iterator found = kernel_->client_tag_index->find(
441       &kernel_->needle);
442   if (found != kernel_->client_tag_index->end()) {
443     return *found;
444   }
445   return NULL;
446 }
447 
GetEntryByServerTag(const string & tag)448 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
449   ScopedKernelLock lock(this);
450   DCHECK(kernel_);
451   // We don't currently keep a separate index for the tags.  Since tags
452   // only exist for server created items that are the first items
453   // to be created in a store, they should have small metahandles.
454   // So, we just iterate over the items in sorted metahandle order,
455   // looking for a match.
456   MetahandlesIndex& set = *kernel_->metahandles_index;
457   for (MetahandlesIndex::iterator i = set.begin(); i != set.end(); ++i) {
458     if ((*i)->ref(UNIQUE_SERVER_TAG) == tag) {
459       return *i;
460     }
461   }
462   return NULL;
463 }
464 
GetEntryByHandle(int64 metahandle)465 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
466   ScopedKernelLock lock(this);
467   return GetEntryByHandle(metahandle, &lock);
468 }
469 
GetEntryByHandle(int64 metahandle,ScopedKernelLock * lock)470 EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
471                                          ScopedKernelLock* lock) {
472   // Look up in memory
473   kernel_->needle.put(META_HANDLE, metahandle);
474   MetahandlesIndex::iterator found =
475       kernel_->metahandles_index->find(&kernel_->needle);
476   if (found != kernel_->metahandles_index->end()) {
477     // Found it in memory.  Easy.
478     return *found;
479   }
480   return NULL;
481 }
482 
GetChildHandles(BaseTransaction * trans,const Id & parent_id,Directory::ChildHandles * result)483 void Directory::GetChildHandles(BaseTransaction* trans, const Id& parent_id,
484                                 Directory::ChildHandles* result) {
485   CHECK(this == trans->directory());
486   result->clear();
487   {
488     ScopedKernelLock lock(this);
489 
490     typedef ParentIdChildIndex::iterator iterator;
491     for (iterator i = GetParentChildIndexLowerBound(lock, parent_id),
492                 end = GetParentChildIndexUpperBound(lock, parent_id);
493          i != end; ++i) {
494       DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
495       result->push_back((*i)->ref(META_HANDLE));
496     }
497   }
498 }
499 
GetRootEntry()500 EntryKernel* Directory::GetRootEntry() {
501   return GetEntryById(Id());
502 }
503 
ZeroFields(EntryKernel * entry,int first_field)504 void ZeroFields(EntryKernel* entry, int first_field) {
505   int i = first_field;
506   // Note that bitset<> constructor sets all bits to zero, and strings
507   // initialize to empty.
508   for ( ; i < INT64_FIELDS_END; ++i)
509     entry->put(static_cast<Int64Field>(i), 0);
510   for ( ; i < ID_FIELDS_END; ++i)
511     entry->mutable_ref(static_cast<IdField>(i)).Clear();
512   for ( ; i < BIT_FIELDS_END; ++i)
513     entry->put(static_cast<BitField>(i), false);
514   if (i < PROTO_FIELDS_END)
515     i = PROTO_FIELDS_END;
516   entry->clear_dirty(NULL);
517 }
518 
InsertEntry(EntryKernel * entry)519 void Directory::InsertEntry(EntryKernel* entry) {
520   ScopedKernelLock lock(this);
521   InsertEntry(entry, &lock);
522 }
523 
InsertEntry(EntryKernel * entry,ScopedKernelLock * lock)524 void Directory::InsertEntry(EntryKernel* entry, ScopedKernelLock* lock) {
525   DCHECK(NULL != lock);
526   CHECK(NULL != entry);
527   static const char error[] = "Entry already in memory index.";
528   CHECK(kernel_->metahandles_index->insert(entry).second) << error;
529 
530   if (!entry->ref(IS_DEL)) {
531     CHECK(kernel_->parent_id_child_index->insert(entry).second) << error;
532   }
533   CHECK(kernel_->ids_index->insert(entry).second) << error;
534 
535   // Should NEVER be created with a client tag.
536   CHECK(entry->ref(UNIQUE_CLIENT_TAG).empty());
537 }
538 
ReindexId(EntryKernel * const entry,const Id & new_id)539 bool Directory::ReindexId(EntryKernel* const entry, const Id& new_id) {
540   ScopedKernelLock lock(this);
541   if (NULL != GetEntryById(new_id, &lock))
542     return false;
543 
544   {
545     // Update the indices that depend on the ID field.
546     ScopedIndexUpdater<IdIndexer> updater_a(lock, entry, kernel_->ids_index);
547     ScopedIndexUpdater<ParentIdAndHandleIndexer> updater_b(lock, entry,
548         kernel_->parent_id_child_index);
549     entry->put(ID, new_id);
550   }
551   return true;
552 }
553 
ReindexParentId(EntryKernel * const entry,const Id & new_parent_id)554 void Directory::ReindexParentId(EntryKernel* const entry,
555                                 const Id& new_parent_id) {
556   ScopedKernelLock lock(this);
557 
558   {
559     // Update the indices that depend on the PARENT_ID field.
560     ScopedIndexUpdater<ParentIdAndHandleIndexer> index_updater(lock, entry,
561         kernel_->parent_id_child_index);
562     entry->put(PARENT_ID, new_parent_id);
563   }
564 }
565 
ClearDirtyMetahandles()566 void Directory::ClearDirtyMetahandles() {
567   kernel_->transaction_mutex.AssertAcquired();
568   kernel_->dirty_metahandles->clear();
569 }
570 
SafeToPurgeFromMemory(const EntryKernel * const entry) const571 bool Directory::SafeToPurgeFromMemory(const EntryKernel* const entry) const {
572   bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
573       !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
574       !entry->ref(IS_UNSYNCED);
575 
576   if (safe) {
577     int64 handle = entry->ref(META_HANDLE);
578     CHECK_EQ(kernel_->dirty_metahandles->count(handle), 0U);
579     // TODO(tim): Bug 49278.
580     CHECK(!kernel_->unsynced_metahandles->count(handle));
581     CHECK(!kernel_->unapplied_update_metahandles->count(handle));
582   }
583 
584   return safe;
585 }
586 
TakeSnapshotForSaveChanges(SaveChangesSnapshot * snapshot)587 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
588   ReadTransaction trans(this, __FILE__, __LINE__);
589   ScopedKernelLock lock(this);
590   // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
591   // clear dirty flags.
592 
593   for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles->begin();
594        i != kernel_->dirty_metahandles->end(); ++i) {
595     EntryKernel* entry = GetEntryByHandle(*i, &lock);
596     if (!entry)
597       continue;
598     // Skip over false positives; it happens relatively infrequently.
599     if (!entry->is_dirty())
600       continue;
601     snapshot->dirty_metas.insert(snapshot->dirty_metas.end(), *entry);
602     DCHECK_EQ(1U, kernel_->dirty_metahandles->count(*i));
603     // We don't bother removing from the index here as we blow the entire thing
604     // in a moment, and it unnecessarily complicates iteration.
605     entry->clear_dirty(NULL);
606   }
607   ClearDirtyMetahandles();
608 
609   // Set purged handles.
610   DCHECK(snapshot->metahandles_to_purge.empty());
611   snapshot->metahandles_to_purge.swap(*(kernel_->metahandles_to_purge));
612 
613   // Fill kernel_info_status and kernel_info.
614   snapshot->kernel_info = kernel_->persisted_info;
615   // To avoid duplicates when the process crashes, we record the next_id to be
616   // greater magnitude than could possibly be reached before the next save
617   // changes.  In other words, it's effectively impossible for the user to
618   // generate 65536 new bookmarks in 3 seconds.
619   snapshot->kernel_info.next_id -= 65536;
620   snapshot->kernel_info_status = kernel_->info_status;
621   // This one we reset on failure.
622   kernel_->info_status = KERNEL_SHARE_INFO_VALID;
623 }
624 
SaveChanges()625 bool Directory::SaveChanges() {
626   bool success = false;
627   DCHECK(store_);
628 
629   base::AutoLock scoped_lock(kernel_->save_changes_mutex);
630 
631   // Snapshot and save.
632   SaveChangesSnapshot snapshot;
633   TakeSnapshotForSaveChanges(&snapshot);
634   success = store_->SaveChanges(snapshot);
635 
636   // Handle success or failure.
637   if (success)
638     VacuumAfterSaveChanges(snapshot);
639   else
640     HandleSaveChangesFailure(snapshot);
641   return success;
642 }
643 
VacuumAfterSaveChanges(const SaveChangesSnapshot & snapshot)644 void Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
645   // Need a write transaction as we are about to permanently purge entries.
646   WriteTransaction trans(this, VACUUM_AFTER_SAVE, __FILE__, __LINE__);
647   ScopedKernelLock lock(this);
648   kernel_->flushed_metahandles.Push(0);  // Begin flush marker
649   // Now drop everything we can out of memory.
650   for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
651        i != snapshot.dirty_metas.end(); ++i) {
652     kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
653     MetahandlesIndex::iterator found =
654         kernel_->metahandles_index->find(&kernel_->needle);
655     EntryKernel* entry = (found == kernel_->metahandles_index->end() ?
656                           NULL : *found);
657     if (entry && SafeToPurgeFromMemory(entry)) {
658       // We now drop deleted metahandles that are up to date on both the client
659       // and the server.
660       size_t num_erased = 0;
661       int64 handle = entry->ref(META_HANDLE);
662       kernel_->flushed_metahandles.Push(handle);
663       num_erased = kernel_->ids_index->erase(entry);
664       DCHECK_EQ(1u, num_erased);
665       num_erased = kernel_->metahandles_index->erase(entry);
666       DCHECK_EQ(1u, num_erased);
667 
668       // Might not be in it
669       num_erased = kernel_->client_tag_index->erase(entry);
670       DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
671       CHECK(!kernel_->parent_id_child_index->count(entry));
672       delete entry;
673     }
674   }
675 }
676 
PurgeEntriesWithTypeIn(const std::set<ModelType> & types)677 void Directory::PurgeEntriesWithTypeIn(const std::set<ModelType>& types) {
678   if (types.count(UNSPECIFIED) != 0U || types.count(TOP_LEVEL_FOLDER) != 0U) {
679     NOTREACHED() << "Don't support purging unspecified or top level entries.";
680     return;
681   }
682 
683   if (types.empty())
684     return;
685 
686   {
687     WriteTransaction trans(this, PURGE_ENTRIES, __FILE__, __LINE__);
688     {
689       ScopedKernelLock lock(this);
690       MetahandlesIndex::iterator it = kernel_->metahandles_index->begin();
691       while (it != kernel_->metahandles_index->end()) {
692         const sync_pb::EntitySpecifics& local_specifics = (*it)->ref(SPECIFICS);
693         const sync_pb::EntitySpecifics& server_specifics =
694             (*it)->ref(SERVER_SPECIFICS);
695         ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
696         ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
697 
698         // Note the dance around incrementing |it|, since we sometimes erase().
699         if (types.count(local_type) > 0 || types.count(server_type) > 0) {
700           UnlinkEntryFromOrder(*it, NULL, &lock);
701           int64 handle = (*it)->ref(META_HANDLE);
702           kernel_->metahandles_to_purge->insert(handle);
703 
704           size_t num_erased = 0;
705           EntryKernel* entry = *it;
706           num_erased = kernel_->ids_index->erase(entry);
707           DCHECK_EQ(1u, num_erased);
708           num_erased = kernel_->client_tag_index->erase(entry);
709           DCHECK_EQ(entry->ref(UNIQUE_CLIENT_TAG).empty(), !num_erased);
710           num_erased = kernel_->unsynced_metahandles->erase(handle);
711           DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
712           num_erased = kernel_->unapplied_update_metahandles->erase(handle);
713           DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
714           num_erased = kernel_->parent_id_child_index->erase(entry);
715           DCHECK_EQ(entry->ref(IS_DEL), !num_erased);
716           kernel_->metahandles_index->erase(it++);
717           delete entry;
718         } else {
719           ++it;
720         }
721       }
722 
723       // Ensure meta tracking for these data types reflects the deleted state.
724       for (std::set<ModelType>::const_iterator it = types.begin();
725            it != types.end(); ++it) {
726         set_initial_sync_ended_for_type_unsafe(*it, false);
727         kernel_->persisted_info.reset_download_progress(*it);
728       }
729     }
730   }
731 }
732 
HandleSaveChangesFailure(const SaveChangesSnapshot & snapshot)733 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
734   ScopedKernelLock lock(this);
735   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
736 
737   // Because we optimistically cleared the dirty bit on the real entries when
738   // taking the snapshot, we must restore it on failure.  Not doing this could
739   // cause lost data, if no other changes are made to the in-memory entries
740   // that would cause the dirty bit to get set again. Setting the bit ensures
741   // that SaveChanges will at least try again later.
742   for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
743        i != snapshot.dirty_metas.end(); ++i) {
744     kernel_->needle.put(META_HANDLE, i->ref(META_HANDLE));
745     MetahandlesIndex::iterator found =
746         kernel_->metahandles_index->find(&kernel_->needle);
747     if (found != kernel_->metahandles_index->end()) {
748       (*found)->mark_dirty(kernel_->dirty_metahandles);
749     }
750   }
751 
752   kernel_->metahandles_to_purge->insert(snapshot.metahandles_to_purge.begin(),
753                                         snapshot.metahandles_to_purge.end());
754 }
755 
GetDownloadProgress(ModelType model_type,sync_pb::DataTypeProgressMarker * value_out) const756 void Directory::GetDownloadProgress(
757     ModelType model_type,
758     sync_pb::DataTypeProgressMarker* value_out) const {
759   ScopedKernelLock lock(this);
760   return value_out->CopyFrom(
761       kernel_->persisted_info.download_progress[model_type]);
762 }
763 
GetDownloadProgressAsString(ModelType model_type,std::string * value_out) const764 void Directory::GetDownloadProgressAsString(
765     ModelType model_type,
766     std::string* value_out) const {
767   ScopedKernelLock lock(this);
768   kernel_->persisted_info.download_progress[model_type].SerializeToString(
769       value_out);
770 }
771 
SetDownloadProgress(ModelType model_type,const sync_pb::DataTypeProgressMarker & new_progress)772 void Directory::SetDownloadProgress(
773     ModelType model_type,
774     const sync_pb::DataTypeProgressMarker& new_progress) {
775   ScopedKernelLock lock(this);
776   kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
777   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
778 }
779 
initial_sync_ended_for_type(ModelType type) const780 bool Directory::initial_sync_ended_for_type(ModelType type) const {
781   ScopedKernelLock lock(this);
782   return kernel_->persisted_info.initial_sync_ended[type];
783 }
784 
get_autofill_migration_state() const785 AutofillMigrationState Directory::get_autofill_migration_state() const {
786   ScopedKernelLock lock(this);
787   return kernel_->persisted_info.autofill_migration_state;
788 }
789 
790 AutofillMigrationDebugInfo
get_autofill_migration_debug_info() const791     Directory::get_autofill_migration_debug_info() const {
792   ScopedKernelLock lock(this);
793   return kernel_->persisted_info.autofill_migration_debug_info;
794 }
795 
TestAndSet(T * kernel_data,const T * data_to_set)796 template <class T> void Directory::TestAndSet(
797     T* kernel_data, const T* data_to_set) {
798   if (*kernel_data != *data_to_set) {
799     *kernel_data = *data_to_set;
800     kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
801   }
802 }
803 
set_autofill_migration_state_debug_info(AutofillMigrationDebugInfo::PropertyToSet property_to_set,const AutofillMigrationDebugInfo & info)804 void Directory::set_autofill_migration_state_debug_info(
805     AutofillMigrationDebugInfo::PropertyToSet property_to_set,
806     const AutofillMigrationDebugInfo& info) {
807 
808   ScopedKernelLock lock(this);
809   switch (property_to_set) {
810     case AutofillMigrationDebugInfo::MIGRATION_TIME: {
811       syncable::AutofillMigrationDebugInfo&
812         debug_info = kernel_->persisted_info.autofill_migration_debug_info;
813       TestAndSet<int64>(
814           &debug_info.autofill_migration_time,
815           &info.autofill_migration_time);
816       break;
817     }
818     case AutofillMigrationDebugInfo::ENTRIES_ADDED: {
819       AutofillMigrationDebugInfo& debug_info =
820         kernel_->persisted_info.autofill_migration_debug_info;
821       TestAndSet<int>(
822           &debug_info.autofill_entries_added_during_migration,
823           &info.autofill_entries_added_during_migration);
824       break;
825     }
826     case AutofillMigrationDebugInfo::PROFILES_ADDED: {
827       AutofillMigrationDebugInfo& debug_info =
828         kernel_->persisted_info.autofill_migration_debug_info;
829       TestAndSet<int>(
830           &debug_info.autofill_profile_added_during_migration,
831           &info.autofill_profile_added_during_migration);
832       break;
833     }
834     default:
835       NOTREACHED();
836   }
837 }
838 
set_autofill_migration_state(AutofillMigrationState state)839 void Directory::set_autofill_migration_state(AutofillMigrationState state) {
840   ScopedKernelLock lock(this);
841   if (state == kernel_->persisted_info.autofill_migration_state) {
842     return;
843   }
844   kernel_->persisted_info.autofill_migration_state = state;
845   if (state == MIGRATED) {
846     syncable::AutofillMigrationDebugInfo& debug_info =
847         kernel_->persisted_info.autofill_migration_debug_info;
848     debug_info.autofill_migration_time =
849         base::Time::Now().ToInternalValue();
850   }
851   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
852 }
853 
set_initial_sync_ended_for_type(ModelType type,bool x)854 void Directory::set_initial_sync_ended_for_type(ModelType type, bool x) {
855   ScopedKernelLock lock(this);
856   set_initial_sync_ended_for_type_unsafe(type, x);
857 }
858 
set_initial_sync_ended_for_type_unsafe(ModelType type,bool x)859 void Directory::set_initial_sync_ended_for_type_unsafe(ModelType type,
860                                                        bool x) {
861   if (kernel_->persisted_info.initial_sync_ended[type] == x)
862     return;
863   kernel_->persisted_info.initial_sync_ended.set(type, x);
864   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
865 }
866 
SetNotificationStateUnsafe(const std::string & notification_state)867 void Directory::SetNotificationStateUnsafe(
868     const std::string& notification_state) {
869   if (notification_state == kernel_->persisted_info.notification_state)
870     return;
871   kernel_->persisted_info.notification_state = notification_state;
872   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
873 }
874 
store_birthday() const875 string Directory::store_birthday() const {
876   ScopedKernelLock lock(this);
877   return kernel_->persisted_info.store_birthday;
878 }
879 
set_store_birthday(const string & store_birthday)880 void Directory::set_store_birthday(const string& store_birthday) {
881   ScopedKernelLock lock(this);
882   if (kernel_->persisted_info.store_birthday == store_birthday)
883     return;
884   kernel_->persisted_info.store_birthday = store_birthday;
885   kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
886 }
887 
GetAndClearNotificationState()888 std::string Directory::GetAndClearNotificationState() {
889   ScopedKernelLock lock(this);
890   std::string notification_state = kernel_->persisted_info.notification_state;
891   SetNotificationStateUnsafe(std::string());
892   return notification_state;
893 }
894 
SetNotificationState(const std::string & notification_state)895 void Directory::SetNotificationState(const std::string& notification_state) {
896   ScopedKernelLock lock(this);
897   SetNotificationStateUnsafe(notification_state);
898 }
899 
cache_guid() const900 string Directory::cache_guid() const {
901   // No need to lock since nothing ever writes to it after load.
902   return kernel_->cache_guid;
903 }
904 
GetAllMetaHandles(BaseTransaction * trans,MetahandleSet * result)905 void Directory::GetAllMetaHandles(BaseTransaction* trans,
906                                   MetahandleSet* result) {
907   result->clear();
908   ScopedKernelLock lock(this);
909   MetahandlesIndex::iterator i;
910   for (i = kernel_->metahandles_index->begin();
911        i != kernel_->metahandles_index->end();
912        ++i) {
913     result->insert((*i)->ref(META_HANDLE));
914   }
915 }
916 
GetUnsyncedMetaHandles(BaseTransaction * trans,UnsyncedMetaHandles * result)917 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
918                                        UnsyncedMetaHandles* result) {
919   result->clear();
920   ScopedKernelLock lock(this);
921   copy(kernel_->unsynced_metahandles->begin(),
922        kernel_->unsynced_metahandles->end(), back_inserter(*result));
923 }
924 
unsynced_entity_count() const925 int64 Directory::unsynced_entity_count() const {
926   ScopedKernelLock lock(this);
927   return kernel_->unsynced_metahandles->size();
928 }
929 
GetUnappliedUpdateMetaHandles(BaseTransaction * trans,UnappliedUpdateMetaHandles * result)930 void Directory::GetUnappliedUpdateMetaHandles(BaseTransaction* trans,
931     UnappliedUpdateMetaHandles* result) {
932   result->clear();
933   ScopedKernelLock lock(this);
934   copy(kernel_->unapplied_update_metahandles->begin(),
935        kernel_->unapplied_update_metahandles->end(),
936        back_inserter(*result));
937 }
938 
939 
940 class IdFilter {
941  public:
~IdFilter()942   virtual ~IdFilter() { }
943   virtual bool ShouldConsider(const Id& id) const = 0;
944 };
945 
946 
947 class FullScanFilter : public IdFilter {
948  public:
ShouldConsider(const Id & id) const949   virtual bool ShouldConsider(const Id& id) const {
950     return true;
951   }
952 };
953 
954 class SomeIdsFilter : public IdFilter {
955  public:
ShouldConsider(const Id & id) const956   virtual bool ShouldConsider(const Id& id) const {
957     return binary_search(ids_.begin(), ids_.end(), id);
958   }
959   std::vector<Id> ids_;
960 };
961 
CheckTreeInvariants(syncable::BaseTransaction * trans,const OriginalEntries * originals)962 void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
963                                     const OriginalEntries* originals) {
964   MetahandleSet handles;
965   SomeIdsFilter filter;
966   filter.ids_.reserve(originals->size());
967   for (OriginalEntries::const_iterator i = originals->begin(),
968          end = originals->end(); i != end; ++i) {
969     Entry e(trans, GET_BY_HANDLE, i->ref(META_HANDLE));
970     CHECK(e.good());
971     filter.ids_.push_back(e.Get(ID));
972     handles.insert(i->ref(META_HANDLE));
973   }
974   std::sort(filter.ids_.begin(), filter.ids_.end());
975   CheckTreeInvariants(trans, handles, filter);
976 }
977 
CheckTreeInvariants(syncable::BaseTransaction * trans,bool full_scan)978 void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
979                                     bool full_scan) {
980   // TODO(timsteele):  This is called every time a WriteTransaction finishes.
981   // The performance hit is substantial given that we now examine every single
982   // syncable entry.  Need to redesign this.
983   MetahandleSet handles;
984   GetAllMetaHandles(trans, &handles);
985   if (full_scan) {
986     FullScanFilter fullfilter;
987     CheckTreeInvariants(trans, handles, fullfilter);
988   } else {
989     SomeIdsFilter filter;
990     MetahandleSet::iterator i;
991     for (i = handles.begin() ; i != handles.end() ; ++i) {
992       Entry e(trans, GET_BY_HANDLE, *i);
993       CHECK(e.good());
994       filter.ids_.push_back(e.Get(ID));
995     }
996     sort(filter.ids_.begin(), filter.ids_.end());
997     CheckTreeInvariants(trans, handles, filter);
998   }
999 }
1000 
CheckTreeInvariants(syncable::BaseTransaction * trans,const MetahandleSet & handles,const IdFilter & idfilter)1001 void Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
1002                                     const MetahandleSet& handles,
1003                                     const IdFilter& idfilter) {
1004   const int64 max_ms = kInvariantCheckMaxMs;
1005   PerfTimer check_timer;
1006   MetahandleSet::const_iterator i;
1007   int entries_done = 0;
1008   for (i = handles.begin() ; i != handles.end() ; ++i) {
1009     int64 metahandle = *i;
1010     Entry e(trans, GET_BY_HANDLE, metahandle);
1011     CHECK(e.good());
1012     syncable::Id id = e.Get(ID);
1013     syncable::Id parentid = e.Get(PARENT_ID);
1014 
1015     if (id.IsRoot()) {
1016       CHECK(e.Get(IS_DIR)) << e;
1017       CHECK(parentid.IsRoot()) << e;
1018       CHECK(!e.Get(IS_UNSYNCED)) << e;
1019       ++entries_done;
1020       continue;
1021     }
1022 
1023     if (!e.Get(IS_DEL)) {
1024       CHECK(id != parentid) << e;
1025       CHECK(!e.Get(NON_UNIQUE_NAME).empty()) << e;
1026       int safety_count = handles.size() + 1;
1027       while (!parentid.IsRoot()) {
1028         if (!idfilter.ShouldConsider(parentid))
1029           break;
1030         Entry parent(trans, GET_BY_ID, parentid);
1031         CHECK(parent.good()) << e;
1032         CHECK(parent.Get(IS_DIR)) << parent << e;
1033         CHECK(!parent.Get(IS_DEL)) << parent << e;
1034         CHECK(handles.end() != handles.find(parent.Get(META_HANDLE)))
1035             << e << parent;
1036         parentid = parent.Get(PARENT_ID);
1037         CHECK_GE(--safety_count, 0) << e << parent;
1038       }
1039     }
1040     int64 base_version = e.Get(BASE_VERSION);
1041     int64 server_version = e.Get(SERVER_VERSION);
1042     bool using_unique_client_tag = !e.Get(UNIQUE_CLIENT_TAG).empty();
1043     if (CHANGES_VERSION == base_version || 0 == base_version) {
1044       if (e.Get(IS_UNAPPLIED_UPDATE)) {
1045         // Must be a new item, or a de-duplicated unique client tag
1046         // that was created both locally and remotely.
1047         if (!using_unique_client_tag) {
1048           CHECK(e.Get(IS_DEL)) << e;
1049         }
1050         // It came from the server, so it must have a server ID.
1051         CHECK(id.ServerKnows()) << e;
1052       } else {
1053         if (e.Get(IS_DIR)) {
1054           // TODO(chron): Implement this mode if clients ever need it.
1055           // For now, you can't combine a client tag and a directory.
1056           CHECK(!using_unique_client_tag) << e;
1057         }
1058         // Should be an uncomitted item, or a successfully deleted one.
1059         if (!e.Get(IS_DEL)) {
1060           CHECK(e.Get(IS_UNSYNCED)) << e;
1061         }
1062         // If the next check failed, it would imply that an item exists
1063         // on the server, isn't waiting for application locally, but either
1064         // is an unsynced create or a sucessful delete in the local copy.
1065         // Either way, that's a mismatch.
1066         CHECK_EQ(0, server_version) << e;
1067         // Items that aren't using the unique client tag should have a zero
1068         // base version only if they have a local ID.  Items with unique client
1069         // tags are allowed to use the zero base version for undeletion and
1070         // de-duplication; the unique client tag trumps the server ID.
1071         if (!using_unique_client_tag) {
1072           CHECK(!id.ServerKnows()) << e;
1073         }
1074       }
1075     } else {
1076       CHECK(id.ServerKnows());
1077     }
1078     ++entries_done;
1079     int64 elapsed_ms = check_timer.Elapsed().InMilliseconds();
1080     if (elapsed_ms > max_ms) {
1081       VLOG(1) << "Cutting Invariant check short after " << elapsed_ms
1082               << "ms. Processed " << entries_done << "/" << handles.size()
1083               << " entries";
1084       return;
1085     }
1086   }
1087 }
1088 
SetChangeListener(DirectoryChangeListener * listener)1089 void Directory::SetChangeListener(DirectoryChangeListener* listener) {
1090   DCHECK(!kernel_->change_listener_);
1091   kernel_->change_listener_ = listener;
1092 }
1093 
1094 ///////////////////////////////////////////////////////////////////////////////
1095 // ScopedKernelLock
1096 
ScopedKernelLock(const Directory * dir)1097 ScopedKernelLock::ScopedKernelLock(const Directory* dir)
1098   :  scoped_lock_(dir->kernel_->mutex), dir_(const_cast<Directory*>(dir)) {
1099 }
1100 
1101 ///////////////////////////////////////////////////////////////////////////
1102 // Transactions
1103 
Lock()1104 void BaseTransaction::Lock() {
1105   base::TimeTicks start_time = base::TimeTicks::Now();
1106 
1107   dirkernel_->transaction_mutex.Acquire();
1108 
1109   time_acquired_ = base::TimeTicks::Now();
1110   const base::TimeDelta elapsed = time_acquired_ - start_time;
1111   if (LOG_IS_ON(INFO) &&
1112       (1 <= logging::GetVlogLevelHelper(
1113           source_file_, ::strlen(source_file_))) &&
1114       (elapsed.InMilliseconds() > 200)) {
1115     logging::LogMessage(source_file_, line_, logging::LOG_INFO).stream()
1116       << name_ << " transaction waited "
1117       << elapsed.InSecondsF() << " seconds.";
1118   }
1119 }
1120 
BaseTransaction(Directory * directory,const char * name,const char * source_file,int line,WriterTag writer)1121 BaseTransaction::BaseTransaction(Directory* directory, const char* name,
1122     const char* source_file, int line, WriterTag writer)
1123   : directory_(directory), dirkernel_(directory->kernel_), name_(name),
1124     source_file_(source_file), line_(line), writer_(writer) {
1125   Lock();
1126 }
1127 
BaseTransaction(Directory * directory)1128 BaseTransaction::BaseTransaction(Directory* directory)
1129     : directory_(directory),
1130       dirkernel_(NULL),
1131       name_(NULL),
1132       source_file_(NULL),
1133       line_(0),
1134       writer_(INVALID) {
1135 }
1136 
~BaseTransaction()1137 BaseTransaction::~BaseTransaction() {}
1138 
UnlockAndLog(OriginalEntries * entries)1139 void BaseTransaction::UnlockAndLog(OriginalEntries* entries) {
1140   // Work while trasnaction mutex is held
1141   ModelTypeBitSet models_with_changes;
1142   if (!NotifyTransactionChangingAndEnding(entries, &models_with_changes))
1143     return;
1144 
1145   // Work after mutex is relased.
1146   NotifyTransactionComplete(models_with_changes);
1147 }
1148 
NotifyTransactionChangingAndEnding(OriginalEntries * entries,ModelTypeBitSet * models_with_changes)1149 bool BaseTransaction::NotifyTransactionChangingAndEnding(
1150     OriginalEntries* entries,
1151     ModelTypeBitSet* models_with_changes) {
1152   dirkernel_->transaction_mutex.AssertAcquired();
1153 
1154   scoped_ptr<OriginalEntries> originals(entries);
1155   const base::TimeDelta elapsed = base::TimeTicks::Now() - time_acquired_;
1156   if (LOG_IS_ON(INFO) &&
1157       (1 <= logging::GetVlogLevelHelper(
1158           source_file_, ::strlen(source_file_))) &&
1159       (elapsed.InMilliseconds() > 50)) {
1160     logging::LogMessage(source_file_, line_, logging::LOG_INFO).stream()
1161         << name_ << " transaction completed in " << elapsed.InSecondsF()
1162         << " seconds.";
1163   }
1164 
1165   if (NULL == originals.get() || originals->empty() ||
1166       !dirkernel_->change_listener_) {
1167     dirkernel_->transaction_mutex.Release();
1168     return false;
1169   }
1170 
1171   if (writer_ == syncable::SYNCAPI) {
1172     dirkernel_->change_listener_->HandleCalculateChangesChangeEventFromSyncApi(
1173         *originals.get(),
1174         writer_,
1175         this);
1176   } else {
1177     dirkernel_->change_listener_->HandleCalculateChangesChangeEventFromSyncer(
1178         *originals.get(),
1179         writer_,
1180         this);
1181   }
1182 
1183   *models_with_changes = dirkernel_->change_listener_->
1184       HandleTransactionEndingChangeEvent(this);
1185 
1186   // Release the transaction. Note, once the transaction is released this thread
1187   // can be interrupted by another that was waiting for the transaction,
1188   // resulting in this code possibly being interrupted with another thread
1189   // performing following the same code path. From this point foward, only
1190   // local state can be touched.
1191   dirkernel_->transaction_mutex.Release();
1192   return true;
1193 }
1194 
NotifyTransactionComplete(ModelTypeBitSet models_with_changes)1195 void BaseTransaction::NotifyTransactionComplete(
1196     ModelTypeBitSet models_with_changes) {
1197   dirkernel_->change_listener_->HandleTransactionCompleteChangeEvent(
1198       models_with_changes);
1199 }
1200 
ReadTransaction(Directory * directory,const char * file,int line)1201 ReadTransaction::ReadTransaction(Directory* directory, const char* file,
1202                                  int line)
1203   : BaseTransaction(directory, "Read", file, line, INVALID) {
1204 }
1205 
ReadTransaction(const ScopedDirLookup & scoped_dir,const char * file,int line)1206 ReadTransaction::ReadTransaction(const ScopedDirLookup& scoped_dir,
1207                                  const char* file, int line)
1208   : BaseTransaction(scoped_dir.operator -> (), "Read", file, line, INVALID) {
1209 }
1210 
~ReadTransaction()1211 ReadTransaction::~ReadTransaction() {
1212   UnlockAndLog(NULL);
1213 }
1214 
WriteTransaction(Directory * directory,WriterTag writer,const char * file,int line)1215 WriteTransaction::WriteTransaction(Directory* directory, WriterTag writer,
1216                                    const char* file, int line)
1217   : BaseTransaction(directory, "Write", file, line, writer),
1218     originals_(new OriginalEntries) {
1219 }
1220 
WriteTransaction(const ScopedDirLookup & scoped_dir,WriterTag writer,const char * file,int line)1221 WriteTransaction::WriteTransaction(const ScopedDirLookup& scoped_dir,
1222                                    WriterTag writer, const char* file, int line)
1223   : BaseTransaction(scoped_dir.operator -> (), "Write", file, line, writer),
1224     originals_(new OriginalEntries) {
1225 }
1226 
WriteTransaction(Directory * directory)1227 WriteTransaction::WriteTransaction(Directory *directory)
1228     : BaseTransaction(directory),
1229       originals_(new OriginalEntries) {
1230 }
1231 
SaveOriginal(EntryKernel * entry)1232 void WriteTransaction::SaveOriginal(EntryKernel* entry) {
1233   if (NULL == entry)
1234     return;
1235   OriginalEntries::iterator i = originals_->lower_bound(*entry);
1236   if (i == originals_->end() ||
1237       i->ref(META_HANDLE) != entry->ref(META_HANDLE)) {
1238     originals_->insert(i, *entry);
1239   }
1240 }
1241 
~WriteTransaction()1242 WriteTransaction::~WriteTransaction() {
1243   if (OFF != kInvariantCheckLevel) {
1244     const bool full_scan = (FULL_DB_VERIFICATION == kInvariantCheckLevel);
1245     if (full_scan)
1246       directory()->CheckTreeInvariants(this, full_scan);
1247     else
1248       directory()->CheckTreeInvariants(this, originals_);
1249   }
1250 
1251   UnlockAndLog(originals_);
1252 }
1253 
1254 ///////////////////////////////////////////////////////////////////////////
1255 // Entry
1256 
Entry(BaseTransaction * trans,GetById,const Id & id)1257 Entry::Entry(BaseTransaction* trans, GetById, const Id& id)
1258     : basetrans_(trans) {
1259   kernel_ = trans->directory()->GetEntryById(id);
1260 }
1261 
Entry(BaseTransaction * trans,GetByClientTag,const string & tag)1262 Entry::Entry(BaseTransaction* trans, GetByClientTag, const string& tag)
1263     : basetrans_(trans) {
1264   kernel_ = trans->directory()->GetEntryByClientTag(tag);
1265 }
1266 
Entry(BaseTransaction * trans,GetByServerTag,const string & tag)1267 Entry::Entry(BaseTransaction* trans, GetByServerTag, const string& tag)
1268     : basetrans_(trans) {
1269   kernel_ = trans->directory()->GetEntryByServerTag(tag);
1270 }
1271 
Entry(BaseTransaction * trans,GetByHandle,int64 metahandle)1272 Entry::Entry(BaseTransaction* trans, GetByHandle, int64 metahandle)
1273     : basetrans_(trans) {
1274   kernel_ = trans->directory()->GetEntryByHandle(metahandle);
1275 }
1276 
dir() const1277 Directory* Entry::dir() const {
1278   return basetrans_->directory();
1279 }
1280 
ComputePrevIdFromServerPosition(const Id & parent_id) const1281 Id Entry::ComputePrevIdFromServerPosition(const Id& parent_id) const {
1282   return dir()->ComputePrevIdFromServerPosition(kernel_, parent_id);
1283 }
1284 
ToValue() const1285 DictionaryValue* Entry::ToValue() const {
1286   DictionaryValue* entry_info = new DictionaryValue();
1287   entry_info->SetBoolean("good", good());
1288   if (good()) {
1289     entry_info->Set("kernel", kernel_->ToValue());
1290     entry_info->Set("serverModelType",
1291                     ModelTypeToValue(GetServerModelTypeHelper()));
1292     entry_info->Set("modelType",
1293                     ModelTypeToValue(GetModelType()));
1294     entry_info->SetBoolean("shouldMaintainPosition",
1295                            ShouldMaintainPosition());
1296     entry_info->SetBoolean("existsOnClientBecauseNameIsNonEmpty",
1297                            ExistsOnClientBecauseNameIsNonEmpty());
1298     entry_info->SetBoolean("isRoot", IsRoot());
1299   }
1300   return entry_info;
1301 }
1302 
Get(StringField field) const1303 const string& Entry::Get(StringField field) const {
1304   DCHECK(kernel_);
1305   return kernel_->ref(field);
1306 }
1307 
GetServerModelType() const1308 syncable::ModelType Entry::GetServerModelType() const {
1309   ModelType specifics_type = GetServerModelTypeHelper();
1310   if (specifics_type != UNSPECIFIED)
1311     return specifics_type;
1312 
1313   // Otherwise, we don't have a server type yet.  That should only happen
1314   // if the item is an uncommitted locally created item.
1315   // It's possible we'll need to relax these checks in the future; they're
1316   // just here for now as a safety measure.
1317   DCHECK(Get(IS_UNSYNCED));
1318   DCHECK_EQ(Get(SERVER_VERSION), 0);
1319   DCHECK(Get(SERVER_IS_DEL));
1320   // Note: can't enforce !Get(ID).ServerKnows() here because that could
1321   // actually happen if we hit AttemptReuniteLostCommitResponses.
1322   return UNSPECIFIED;
1323 }
1324 
GetServerModelTypeHelper() const1325 syncable::ModelType Entry::GetServerModelTypeHelper() const {
1326   ModelType specifics_type = GetModelTypeFromSpecifics(Get(SERVER_SPECIFICS));
1327   if (specifics_type != UNSPECIFIED)
1328     return specifics_type;
1329   if (IsRoot())
1330     return TOP_LEVEL_FOLDER;
1331   // Loose check for server-created top-level folders that aren't
1332   // bound to a particular model type.
1333   if (!Get(UNIQUE_SERVER_TAG).empty() && Get(SERVER_IS_DIR))
1334     return TOP_LEVEL_FOLDER;
1335 
1336   return UNSPECIFIED;
1337 }
1338 
GetModelType() const1339 syncable::ModelType Entry::GetModelType() const {
1340   ModelType specifics_type = GetModelTypeFromSpecifics(Get(SPECIFICS));
1341   if (specifics_type != UNSPECIFIED)
1342     return specifics_type;
1343   if (IsRoot())
1344     return TOP_LEVEL_FOLDER;
1345   // Loose check for server-created top-level folders that aren't
1346   // bound to a particular model type.
1347   if (!Get(UNIQUE_SERVER_TAG).empty() && Get(IS_DIR))
1348     return TOP_LEVEL_FOLDER;
1349 
1350   return UNSPECIFIED;
1351 }
1352 
1353 ///////////////////////////////////////////////////////////////////////////
1354 // MutableEntry
1355 
MutableEntry(WriteTransaction * trans,Create,const Id & parent_id,const string & name)1356 MutableEntry::MutableEntry(WriteTransaction* trans, Create,
1357                            const Id& parent_id, const string& name)
1358     : Entry(trans),
1359       write_transaction_(trans) {
1360   Init(trans, parent_id, name);
1361 }
1362 
1363 
Init(WriteTransaction * trans,const Id & parent_id,const string & name)1364 void MutableEntry::Init(WriteTransaction* trans, const Id& parent_id,
1365                         const string& name) {
1366   kernel_ = new EntryKernel;
1367   ZeroFields(kernel_, BEGIN_FIELDS);
1368   kernel_->put(ID, trans->directory_->NextId());
1369   kernel_->put(META_HANDLE, trans->directory_->NextMetahandle());
1370   kernel_->mark_dirty(trans->directory_->kernel_->dirty_metahandles);
1371   kernel_->put(PARENT_ID, parent_id);
1372   kernel_->put(NON_UNIQUE_NAME, name);
1373   const int64 now = Now();
1374   kernel_->put(CTIME, now);
1375   kernel_->put(MTIME, now);
1376   // We match the database defaults here
1377   kernel_->put(BASE_VERSION, CHANGES_VERSION);
1378   trans->directory()->InsertEntry(kernel_);
1379   // Because this entry is new, it was originally deleted.
1380   kernel_->put(IS_DEL, true);
1381   trans->SaveOriginal(kernel_);
1382   kernel_->put(IS_DEL, false);
1383 }
1384 
MutableEntry(WriteTransaction * trans,CreateNewUpdateItem,const Id & id)1385 MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
1386                            const Id& id)
1387     : Entry(trans), write_transaction_(trans) {
1388   Entry same_id(trans, GET_BY_ID, id);
1389   if (same_id.good()) {
1390     kernel_ = NULL;  // already have an item with this ID.
1391     return;
1392   }
1393   kernel_ = new EntryKernel;
1394   ZeroFields(kernel_, BEGIN_FIELDS);
1395   kernel_->put(ID, id);
1396   kernel_->put(META_HANDLE, trans->directory_->NextMetahandle());
1397   kernel_->mark_dirty(trans->directory_->kernel_->dirty_metahandles);
1398   kernel_->put(IS_DEL, true);
1399   // We match the database defaults here
1400   kernel_->put(BASE_VERSION, CHANGES_VERSION);
1401   trans->directory()->InsertEntry(kernel_);
1402   trans->SaveOriginal(kernel_);
1403 }
1404 
MutableEntry(WriteTransaction * trans,GetById,const Id & id)1405 MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
1406     : Entry(trans, GET_BY_ID, id), write_transaction_(trans) {
1407   trans->SaveOriginal(kernel_);
1408 }
1409 
MutableEntry(WriteTransaction * trans,GetByHandle,int64 metahandle)1410 MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
1411                            int64 metahandle)
1412     : Entry(trans, GET_BY_HANDLE, metahandle), write_transaction_(trans) {
1413   trans->SaveOriginal(kernel_);
1414 }
1415 
MutableEntry(WriteTransaction * trans,GetByClientTag,const std::string & tag)1416 MutableEntry::MutableEntry(WriteTransaction* trans, GetByClientTag,
1417                            const std::string& tag)
1418     : Entry(trans, GET_BY_CLIENT_TAG, tag), write_transaction_(trans) {
1419   trans->SaveOriginal(kernel_);
1420 }
1421 
MutableEntry(WriteTransaction * trans,GetByServerTag,const string & tag)1422 MutableEntry::MutableEntry(WriteTransaction* trans, GetByServerTag,
1423                            const string& tag)
1424     : Entry(trans, GET_BY_SERVER_TAG, tag), write_transaction_(trans) {
1425   trans->SaveOriginal(kernel_);
1426 }
1427 
PutIsDel(bool is_del)1428 bool MutableEntry::PutIsDel(bool is_del) {
1429   DCHECK(kernel_);
1430   if (is_del == kernel_->ref(IS_DEL)) {
1431     return true;
1432   }
1433   if (is_del)
1434     UnlinkFromOrder();
1435 
1436   {
1437     ScopedKernelLock lock(dir());
1438     // Some indices don't include deleted items and must be updated
1439     // upon a value change.
1440     ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_,
1441         dir()->kernel_->parent_id_child_index);
1442 
1443     kernel_->put(IS_DEL, is_del);
1444     kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
1445   }
1446 
1447   if (!is_del)
1448     PutPredecessor(Id());  // Restores position to the 0th index.
1449 
1450   return true;
1451 }
1452 
Put(Int64Field field,const int64 & value)1453 bool MutableEntry::Put(Int64Field field, const int64& value) {
1454   DCHECK(kernel_);
1455   if (kernel_->ref(field) != value) {
1456     ScopedKernelLock lock(dir());
1457     if (SERVER_POSITION_IN_PARENT == field) {
1458       ScopedIndexUpdater<ParentIdAndHandleIndexer> updater(lock, kernel_,
1459           dir()->kernel_->parent_id_child_index);
1460       kernel_->put(field, value);
1461     } else {
1462       kernel_->put(field, value);
1463     }
1464     kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
1465   }
1466   return true;
1467 }
1468 
Put(IdField field,const Id & value)1469 bool MutableEntry::Put(IdField field, const Id& value) {
1470   DCHECK(kernel_);
1471   if (kernel_->ref(field) != value) {
1472     if (ID == field) {
1473       if (!dir()->ReindexId(kernel_, value))
1474         return false;
1475     } else if (PARENT_ID == field) {
1476       PutParentIdPropertyOnly(value);  // Makes sibling order inconsistent.
1477       PutPredecessor(Id());  // Fixes up the sibling order inconsistency.
1478     } else {
1479       kernel_->put(field, value);
1480     }
1481     kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
1482   }
1483   return true;
1484 }
1485 
PutParentIdPropertyOnly(const Id & parent_id)1486 void MutableEntry::PutParentIdPropertyOnly(const Id& parent_id) {
1487   dir()->ReindexParentId(kernel_, parent_id);
1488   kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
1489 }
1490 
Put(BaseVersion field,int64 value)1491 bool MutableEntry::Put(BaseVersion field, int64 value) {
1492   DCHECK(kernel_);
1493   if (kernel_->ref(field) != value) {
1494     kernel_->put(field, value);
1495     kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
1496   }
1497   return true;
1498 }
1499 
Put(StringField field,const string & value)1500 bool MutableEntry::Put(StringField field, const string& value) {
1501   return PutImpl(field, value);
1502 }
1503 
Put(ProtoField field,const sync_pb::EntitySpecifics & value)1504 bool MutableEntry::Put(ProtoField field,
1505                        const sync_pb::EntitySpecifics& value) {
1506   DCHECK(kernel_);
1507   // TODO(ncarter): This is unfortunately heavyweight.  Can we do
1508   // better?
1509   if (kernel_->ref(field).SerializeAsString() != value.SerializeAsString()) {
1510     kernel_->put(field, value);
1511     kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
1512   }
1513   return true;
1514 }
1515 
Put(BitField field,bool value)1516 bool MutableEntry::Put(BitField field, bool value) {
1517   DCHECK(kernel_);
1518   if (kernel_->ref(field) != value) {
1519     kernel_->put(field, value);
1520     kernel_->mark_dirty(GetDirtyIndexHelper());
1521   }
1522   return true;
1523 }
1524 
GetDirtyIndexHelper()1525 MetahandleSet* MutableEntry::GetDirtyIndexHelper() {
1526   return dir()->kernel_->dirty_metahandles;
1527 }
1528 
PutUniqueClientTag(const string & new_tag)1529 bool MutableEntry::PutUniqueClientTag(const string& new_tag) {
1530   // There is no SERVER_UNIQUE_CLIENT_TAG. This field is similar to ID.
1531   string old_tag = kernel_->ref(UNIQUE_CLIENT_TAG);
1532   if (old_tag == new_tag) {
1533     return true;
1534   }
1535 
1536   ScopedKernelLock lock(dir());
1537   if (!new_tag.empty()) {
1538     // Make sure your new value is not in there already.
1539     EntryKernel lookup_kernel_ = *kernel_;
1540     lookup_kernel_.put(UNIQUE_CLIENT_TAG, new_tag);
1541     bool new_tag_conflicts =
1542         (dir()->kernel_->client_tag_index->count(&lookup_kernel_) > 0);
1543     if (new_tag_conflicts) {
1544       return false;
1545     }
1546   }
1547 
1548   {
1549     ScopedIndexUpdater<ClientTagIndexer> index_updater(lock, kernel_,
1550         dir()->kernel_->client_tag_index);
1551     kernel_->put(UNIQUE_CLIENT_TAG, new_tag);
1552     kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
1553   }
1554   return true;
1555 }
1556 
PutImpl(StringField field,const string & value)1557 bool MutableEntry::PutImpl(StringField field, const string& value) {
1558   DCHECK(kernel_);
1559   if (field == UNIQUE_CLIENT_TAG) {
1560     return PutUniqueClientTag(value);
1561   }
1562 
1563   if (kernel_->ref(field) != value) {
1564     kernel_->put(field, value);
1565     kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
1566   }
1567   return true;
1568 }
1569 
Put(IndexedBitField field,bool value)1570 bool MutableEntry::Put(IndexedBitField field, bool value) {
1571   DCHECK(kernel_);
1572   if (kernel_->ref(field) != value) {
1573     MetahandleSet* index;
1574     if (IS_UNSYNCED == field)
1575       index = dir()->kernel_->unsynced_metahandles;
1576     else
1577       index = dir()->kernel_->unapplied_update_metahandles;
1578 
1579     ScopedKernelLock lock(dir());
1580     if (value)
1581       CHECK(index->insert(kernel_->ref(META_HANDLE)).second);
1582     else
1583       CHECK_EQ(1U, index->erase(kernel_->ref(META_HANDLE)));
1584     kernel_->put(field, value);
1585     kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
1586   }
1587   return true;
1588 }
1589 
UnlinkFromOrder()1590 void MutableEntry::UnlinkFromOrder() {
1591   ScopedKernelLock lock(dir());
1592   dir()->UnlinkEntryFromOrder(kernel_, write_transaction(), &lock);
1593 }
1594 
UnlinkEntryFromOrder(EntryKernel * entry,WriteTransaction * trans,ScopedKernelLock * lock)1595 void Directory::UnlinkEntryFromOrder(EntryKernel* entry,
1596                                      WriteTransaction* trans,
1597                                      ScopedKernelLock* lock) {
1598   CHECK(!trans || this == trans->directory());
1599   Id old_previous = entry->ref(PREV_ID);
1600   Id old_next = entry->ref(NEXT_ID);
1601 
1602   entry->put(NEXT_ID, entry->ref(ID));
1603   entry->put(PREV_ID, entry->ref(ID));
1604   entry->mark_dirty(kernel_->dirty_metahandles);
1605 
1606   if (!old_previous.IsRoot()) {
1607     if (old_previous == old_next) {
1608       // Note previous == next doesn't imply previous == next == Get(ID). We
1609       // could have prev==next=="c-XX" and Get(ID)=="sX..." if an item was added
1610       // and deleted before receiving the server ID in the commit response.
1611       CHECK((old_next == entry->ref(ID)) || !old_next.ServerKnows());
1612       return;  // Done if we were already self-looped (hence unlinked).
1613     }
1614     EntryKernel* previous_entry = GetEntryById(old_previous, lock);
1615     CHECK(previous_entry);
1616     if (trans)
1617       trans->SaveOriginal(previous_entry);
1618     previous_entry->put(NEXT_ID, old_next);
1619     previous_entry->mark_dirty(kernel_->dirty_metahandles);
1620   }
1621 
1622   if (!old_next.IsRoot()) {
1623     EntryKernel* next_entry = GetEntryById(old_next, lock);
1624     CHECK(next_entry);
1625     if (trans)
1626       trans->SaveOriginal(next_entry);
1627     next_entry->put(PREV_ID, old_previous);
1628     next_entry->mark_dirty(kernel_->dirty_metahandles);
1629   }
1630 }
1631 
PutPredecessor(const Id & predecessor_id)1632 bool MutableEntry::PutPredecessor(const Id& predecessor_id) {
1633   UnlinkFromOrder();
1634 
1635   if (Get(IS_DEL)) {
1636     DCHECK(predecessor_id.IsNull());
1637     return true;
1638   }
1639 
1640   // TODO(ncarter): It should be possible to not maintain position for
1641   // non-bookmark items.  However, we'd need to robustly handle all possible
1642   // permutations of setting IS_DEL and the SPECIFICS to identify the
1643   // object type; or else, we'd need to add a ModelType to the
1644   // MutableEntry's Create ctor.
1645   //   if (!ShouldMaintainPosition()) {
1646   //     return false;
1647   //   }
1648 
1649   // This is classic insert-into-doubly-linked-list from CS 101 and your last
1650   // job interview.  An "IsRoot" Id signifies the head or tail.
1651   Id successor_id;
1652   if (!predecessor_id.IsRoot()) {
1653     MutableEntry predecessor(write_transaction(), GET_BY_ID, predecessor_id);
1654     CHECK(predecessor.good());
1655     if (predecessor.Get(PARENT_ID) != Get(PARENT_ID))
1656       return false;
1657     successor_id = predecessor.Get(NEXT_ID);
1658     predecessor.Put(NEXT_ID, Get(ID));
1659   } else {
1660     syncable::Directory* dir = trans()->directory();
1661     successor_id = dir->GetFirstChildId(trans(), Get(PARENT_ID));
1662   }
1663   if (!successor_id.IsRoot()) {
1664     MutableEntry successor(write_transaction(), GET_BY_ID, successor_id);
1665     CHECK(successor.good());
1666     if (successor.Get(PARENT_ID) != Get(PARENT_ID))
1667       return false;
1668     successor.Put(PREV_ID, Get(ID));
1669   }
1670   DCHECK(predecessor_id != Get(ID));
1671   DCHECK(successor_id != Get(ID));
1672   Put(PREV_ID, predecessor_id);
1673   Put(NEXT_ID, successor_id);
1674   return true;
1675 }
1676 
Put(BitTemp field,bool value)1677 bool MutableEntry::Put(BitTemp field, bool value) {
1678   DCHECK(kernel_);
1679   kernel_->put(field, value);
1680   return true;
1681 }
1682 
1683 ///////////////////////////////////////////////////////////////////////////
1684 // High-level functions
1685 
NextMetahandle()1686 int64 Directory::NextMetahandle() {
1687   ScopedKernelLock lock(this);
1688   int64 metahandle = (kernel_->next_metahandle)++;
1689   return metahandle;
1690 }
1691 
1692 // Always returns a client ID that is the string representation of a negative
1693 // number.
NextId()1694 Id Directory::NextId() {
1695   int64 result;
1696   {
1697     ScopedKernelLock lock(this);
1698     result = (kernel_->persisted_info.next_id)--;
1699     kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1700   }
1701   DCHECK_LT(result, 0);
1702   return Id::CreateFromClientString(base::Int64ToString(result));
1703 }
1704 
GetFirstChildId(BaseTransaction * trans,const Id & parent_id)1705 Id Directory::GetFirstChildId(BaseTransaction* trans,
1706                               const Id& parent_id) {
1707   ScopedKernelLock lock(this);
1708   // We can use the server positional ordering as a hint because it's generally
1709   // in sync with the local (linked-list) positional ordering, and we have an
1710   // index on it.
1711   ParentIdChildIndex::iterator candidate =
1712       GetParentChildIndexLowerBound(lock, parent_id);
1713   ParentIdChildIndex::iterator end_range =
1714       GetParentChildIndexUpperBound(lock, parent_id);
1715   for (; candidate != end_range; ++candidate) {
1716     EntryKernel* entry = *candidate;
1717     // Filter out self-looped items, which are temporarily not in the child
1718     // ordering.
1719     if (entry->ref(PREV_ID).IsRoot() ||
1720         entry->ref(PREV_ID) != entry->ref(NEXT_ID)) {
1721       // Walk to the front of the list; the server position ordering
1722       // is commonly identical to the linked-list ordering, but pending
1723       // unsynced or unapplied items may diverge.
1724       while (!entry->ref(PREV_ID).IsRoot()) {
1725         entry = GetEntryById(entry->ref(PREV_ID), &lock);
1726       }
1727       return entry->ref(ID);
1728     }
1729   }
1730   // There were no children in the linked list.
1731   return Id();
1732 }
1733 
GetLastChildId(BaseTransaction * trans,const Id & parent_id)1734 Id Directory::GetLastChildId(BaseTransaction* trans,
1735                              const Id& parent_id) {
1736   ScopedKernelLock lock(this);
1737   // We can use the server positional ordering as a hint because it's generally
1738   // in sync with the local (linked-list) positional ordering, and we have an
1739   // index on it.
1740   ParentIdChildIndex::iterator begin_range =
1741       GetParentChildIndexLowerBound(lock, parent_id);
1742   ParentIdChildIndex::iterator candidate =
1743       GetParentChildIndexUpperBound(lock, parent_id);
1744 
1745   while (begin_range != candidate) {
1746     --candidate;
1747     EntryKernel* entry = *candidate;
1748 
1749     // Filter out self-looped items, which are temporarily not in the child
1750     // ordering.
1751     if (entry->ref(NEXT_ID).IsRoot() ||
1752         entry->ref(NEXT_ID) != entry->ref(PREV_ID)) {
1753       // Walk to the back of the list; the server position ordering
1754       // is commonly identical to the linked-list ordering, but pending
1755       // unsynced or unapplied items may diverge.
1756       while (!entry->ref(NEXT_ID).IsRoot())
1757         entry = GetEntryById(entry->ref(NEXT_ID), &lock);
1758       return entry->ref(ID);
1759     }
1760   }
1761   // There were no children in the linked list.
1762   return Id();
1763 }
1764 
ComputePrevIdFromServerPosition(const EntryKernel * entry,const syncable::Id & parent_id)1765 Id Directory::ComputePrevIdFromServerPosition(
1766     const EntryKernel* entry,
1767     const syncable::Id& parent_id) {
1768   ScopedKernelLock lock(this);
1769 
1770   // Find the natural insertion point in the parent_id_child_index, and
1771   // work back from there, filtering out ineligible candidates.
1772   ParentIdChildIndex::iterator sibling = LocateInParentChildIndex(lock,
1773       parent_id, entry->ref(SERVER_POSITION_IN_PARENT), entry->ref(ID));
1774   ParentIdChildIndex::iterator first_sibling =
1775       GetParentChildIndexLowerBound(lock, parent_id);
1776 
1777   while (sibling != first_sibling) {
1778     --sibling;
1779     EntryKernel* candidate = *sibling;
1780 
1781     // The item itself should never be in the range under consideration.
1782     DCHECK_NE(candidate->ref(META_HANDLE), entry->ref(META_HANDLE));
1783 
1784     // Ignore unapplied updates -- they might not even be server-siblings.
1785     if (candidate->ref(IS_UNAPPLIED_UPDATE))
1786       continue;
1787 
1788     // We can't trust the SERVER_ fields of unsynced items, but they are
1789     // potentially legitimate local predecessors.  In the case where
1790     // |update_item| and an unsynced item wind up in the same insertion
1791     // position, we need to choose how to order them.  The following check puts
1792     // the unapplied update first; removing it would put the unsynced item(s)
1793     // first.
1794     if (candidate->ref(IS_UNSYNCED))
1795       continue;
1796 
1797     // Skip over self-looped items, which are not valid predecessors.  This
1798     // shouldn't happen in practice, but is worth defending against.
1799     if (candidate->ref(PREV_ID) == candidate->ref(NEXT_ID) &&
1800         !candidate->ref(PREV_ID).IsRoot()) {
1801       NOTREACHED();
1802       continue;
1803     }
1804     return candidate->ref(ID);
1805   }
1806   // This item will be the first in the sibling order.
1807   return Id();
1808 }
1809 
IsLegalNewParent(BaseTransaction * trans,const Id & entry_id,const Id & new_parent_id)1810 bool IsLegalNewParent(BaseTransaction* trans, const Id& entry_id,
1811                       const Id& new_parent_id) {
1812   if (entry_id.IsRoot())
1813     return false;
1814   // we have to ensure that the entry is not an ancestor of the new parent.
1815   Id ancestor_id = new_parent_id;
1816   while (!ancestor_id.IsRoot()) {
1817     if (entry_id == ancestor_id)
1818       return false;
1819     Entry new_parent(trans, GET_BY_ID, ancestor_id);
1820     CHECK(new_parent.good());
1821     ancestor_id = new_parent.Get(PARENT_ID);
1822   }
1823   return true;
1824 }
1825 
1826 // This function sets only the flags needed to get this entry to sync.
MarkForSyncing(syncable::MutableEntry * e)1827 void MarkForSyncing(syncable::MutableEntry* e) {
1828   DCHECK_NE(static_cast<MutableEntry*>(NULL), e);
1829   DCHECK(!e->IsRoot()) << "We shouldn't mark a permanent object for syncing.";
1830   e->Put(IS_UNSYNCED, true);
1831   e->Put(SYNCING, false);
1832 }
1833 
operator <<(std::ostream & os,const Entry & entry)1834 std::ostream& operator<<(std::ostream& os, const Entry& entry) {
1835   int i;
1836   EntryKernel* const kernel = entry.kernel_;
1837   for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
1838     os << g_metas_columns[i].name << ": "
1839        << kernel->ref(static_cast<Int64Field>(i)) << ", ";
1840   }
1841   for ( ; i < ID_FIELDS_END; ++i) {
1842     os << g_metas_columns[i].name << ": "
1843        << kernel->ref(static_cast<IdField>(i)) << ", ";
1844   }
1845   os << "Flags: ";
1846   for ( ; i < BIT_FIELDS_END; ++i) {
1847     if (kernel->ref(static_cast<BitField>(i)))
1848       os << g_metas_columns[i].name << ", ";
1849   }
1850   for ( ; i < STRING_FIELDS_END; ++i) {
1851     const string& field = kernel->ref(static_cast<StringField>(i));
1852     os << g_metas_columns[i].name << ": " << field << ", ";
1853   }
1854   for ( ; i < PROTO_FIELDS_END; ++i) {
1855     os << g_metas_columns[i].name << ": "
1856        << EscapePath(
1857            kernel->ref(static_cast<ProtoField>(i)).SerializeAsString())
1858        << ", ";
1859   }
1860   os << "TempFlags: ";
1861   for ( ; i < BIT_TEMPS_END; ++i) {
1862     if (kernel->ref(static_cast<BitTemp>(i)))
1863       os << "#" << i - BIT_TEMPS_BEGIN << ", ";
1864   }
1865   return os;
1866 }
1867 
operator <<(std::ostream & s,const Blob & blob)1868 std::ostream& operator<<(std::ostream& s, const Blob& blob) {
1869   for (Blob::const_iterator i = blob.begin(); i != blob.end(); ++i)
1870     s << std::hex << std::setw(2)
1871       << std::setfill('0') << static_cast<unsigned int>(*i);
1872   return s << std::dec;
1873 }
1874 
LocateInParentChildIndex(const ScopedKernelLock & lock,const Id & parent_id,int64 position_in_parent,const Id & item_id_for_tiebreaking)1875 Directory::ParentIdChildIndex::iterator Directory::LocateInParentChildIndex(
1876     const ScopedKernelLock& lock,
1877     const Id& parent_id,
1878     int64 position_in_parent,
1879     const Id& item_id_for_tiebreaking) {
1880   kernel_->needle.put(PARENT_ID, parent_id);
1881   kernel_->needle.put(SERVER_POSITION_IN_PARENT, position_in_parent);
1882   kernel_->needle.put(ID, item_id_for_tiebreaking);
1883   return kernel_->parent_id_child_index->lower_bound(&kernel_->needle);
1884 }
1885 
1886 Directory::ParentIdChildIndex::iterator
GetParentChildIndexLowerBound(const ScopedKernelLock & lock,const Id & parent_id)1887 Directory::GetParentChildIndexLowerBound(const ScopedKernelLock& lock,
1888                                          const Id& parent_id) {
1889   // Peg the parent ID, and use the least values for the remaining
1890   // index variables.
1891   return LocateInParentChildIndex(lock, parent_id,
1892       std::numeric_limits<int64>::min(),
1893       Id::GetLeastIdForLexicographicComparison());
1894 }
1895 
1896 
1897 Directory::ParentIdChildIndex::iterator
GetParentChildIndexUpperBound(const ScopedKernelLock & lock,const Id & parent_id)1898 Directory::GetParentChildIndexUpperBound(const ScopedKernelLock& lock,
1899                                          const Id& parent_id) {
1900   // The upper bound of |parent_id|'s range is the lower
1901   // bound of |++parent_id|'s range.
1902   return GetParentChildIndexLowerBound(lock,
1903       parent_id.GetLexicographicSuccessor());
1904 }
1905 
1906 }  // namespace syncable
1907