1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "sync/syncable/directory.h"
6
7 #include <algorithm>
8 #include <iterator>
9
10 #include "base/base64.h"
11 #include "base/debug/trace_event.h"
12 #include "base/stl_util.h"
13 #include "base/strings/string_number_conversions.h"
14 #include "sync/internal_api/public/base/attachment_id_proto.h"
15 #include "sync/internal_api/public/base/unique_position.h"
16 #include "sync/internal_api/public/util/unrecoverable_error_handler.h"
17 #include "sync/syncable/entry.h"
18 #include "sync/syncable/entry_kernel.h"
19 #include "sync/syncable/in_memory_directory_backing_store.h"
20 #include "sync/syncable/on_disk_directory_backing_store.h"
21 #include "sync/syncable/scoped_kernel_lock.h"
22 #include "sync/syncable/scoped_parent_child_index_updater.h"
23 #include "sync/syncable/syncable-inl.h"
24 #include "sync/syncable/syncable_base_transaction.h"
25 #include "sync/syncable/syncable_changes_version.h"
26 #include "sync/syncable/syncable_read_transaction.h"
27 #include "sync/syncable/syncable_util.h"
28 #include "sync/syncable/syncable_write_transaction.h"
29
30 using std::string;
31
32 namespace syncer {
33 namespace syncable {
34
35 // static
36 const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
37 FILE_PATH_LITERAL("SyncData.sqlite3");
38
PersistedKernelInfo()39 Directory::PersistedKernelInfo::PersistedKernelInfo()
40 : next_id(0) {
41 ModelTypeSet protocol_types = ProtocolTypes();
42 for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
43 iter.Inc()) {
44 ResetDownloadProgress(iter.Get());
45 transaction_version[iter.Get()] = 0;
46 }
47 }
48
~PersistedKernelInfo()49 Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
50
ResetDownloadProgress(ModelType model_type)51 void Directory::PersistedKernelInfo::ResetDownloadProgress(
52 ModelType model_type) {
53 // Clear everything except the data type id field.
54 download_progress[model_type].Clear();
55 download_progress[model_type].set_data_type_id(
56 GetSpecificsFieldNumberFromModelType(model_type));
57
58 // Explicitly set an empty token field to denote no progress.
59 download_progress[model_type].set_token("");
60 }
61
HasEmptyDownloadProgress(ModelType model_type)62 bool Directory::PersistedKernelInfo::HasEmptyDownloadProgress(
63 ModelType model_type) {
64 const sync_pb::DataTypeProgressMarker& progress_marker =
65 download_progress[model_type];
66 return progress_marker.token().empty();
67 }
68
SaveChangesSnapshot()69 Directory::SaveChangesSnapshot::SaveChangesSnapshot()
70 : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
71 }
72
~SaveChangesSnapshot()73 Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
74 STLDeleteElements(&dirty_metas);
75 STLDeleteElements(&delete_journals);
76 }
77
Kernel(const std::string & name,const KernelLoadInfo & info,DirectoryChangeDelegate * delegate,const WeakHandle<TransactionObserver> & transaction_observer)78 Directory::Kernel::Kernel(
79 const std::string& name,
80 const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
81 const WeakHandle<TransactionObserver>& transaction_observer)
82 : next_write_transaction_id(0),
83 name(name),
84 info_status(Directory::KERNEL_SHARE_INFO_VALID),
85 persisted_info(info.kernel_info),
86 cache_guid(info.cache_guid),
87 next_metahandle(info.max_metahandle + 1),
88 delegate(delegate),
89 transaction_observer(transaction_observer) {
90 DCHECK(delegate);
91 DCHECK(transaction_observer.IsInitialized());
92 }
93
~Kernel()94 Directory::Kernel::~Kernel() {
95 STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
96 metahandles_map.end());
97 }
98
Directory(DirectoryBackingStore * store,UnrecoverableErrorHandler * unrecoverable_error_handler,ReportUnrecoverableErrorFunction report_unrecoverable_error_function,NigoriHandler * nigori_handler,Cryptographer * cryptographer)99 Directory::Directory(
100 DirectoryBackingStore* store,
101 UnrecoverableErrorHandler* unrecoverable_error_handler,
102 ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
103 NigoriHandler* nigori_handler,
104 Cryptographer* cryptographer)
105 : kernel_(NULL),
106 store_(store),
107 unrecoverable_error_handler_(unrecoverable_error_handler),
108 report_unrecoverable_error_function_(
109 report_unrecoverable_error_function),
110 unrecoverable_error_set_(false),
111 nigori_handler_(nigori_handler),
112 cryptographer_(cryptographer),
113 invariant_check_level_(VERIFY_CHANGES) {
114 }
115
~Directory()116 Directory::~Directory() {
117 Close();
118 }
119
Open(const string & name,DirectoryChangeDelegate * delegate,const WeakHandle<TransactionObserver> & transaction_observer)120 DirOpenResult Directory::Open(
121 const string& name,
122 DirectoryChangeDelegate* delegate,
123 const WeakHandle<TransactionObserver>& transaction_observer) {
124 TRACE_EVENT0("sync", "SyncDatabaseOpen");
125
126 const DirOpenResult result =
127 OpenImpl(name, delegate, transaction_observer);
128
129 if (OPENED != result)
130 Close();
131 return result;
132 }
133
InitializeIndices(MetahandlesMap * handles_map)134 void Directory::InitializeIndices(MetahandlesMap* handles_map) {
135 ScopedKernelLock lock(this);
136 kernel_->metahandles_map.swap(*handles_map);
137 for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
138 it != kernel_->metahandles_map.end(); ++it) {
139 EntryKernel* entry = it->second;
140 if (ParentChildIndex::ShouldInclude(entry))
141 kernel_->parent_child_index.Insert(entry);
142 const int64 metahandle = entry->ref(META_HANDLE);
143 if (entry->ref(IS_UNSYNCED))
144 kernel_->unsynced_metahandles.insert(metahandle);
145 if (entry->ref(IS_UNAPPLIED_UPDATE)) {
146 const ModelType type = entry->GetServerModelType();
147 kernel_->unapplied_update_metahandles[type].insert(metahandle);
148 }
149 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
150 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
151 kernel_->server_tags_map.end())
152 << "Unexpected duplicate use of client tag";
153 kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
154 }
155 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
156 DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
157 kernel_->server_tags_map.end())
158 << "Unexpected duplicate use of server tag";
159 kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
160 }
161 DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
162 kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
163 kernel_->ids_map[entry->ref(ID).value()] = entry;
164 DCHECK(!entry->is_dirty());
165 AddToAttachmentIndex(lock, metahandle, entry->ref(ATTACHMENT_METADATA));
166 }
167 }
168
OpenImpl(const string & name,DirectoryChangeDelegate * delegate,const WeakHandle<TransactionObserver> & transaction_observer)169 DirOpenResult Directory::OpenImpl(
170 const string& name,
171 DirectoryChangeDelegate* delegate,
172 const WeakHandle<TransactionObserver>&
173 transaction_observer) {
174 KernelLoadInfo info;
175 // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
176 // swap these later.
177 Directory::MetahandlesMap tmp_handles_map;
178
179 // Avoids mem leaks on failure. Harmlessly deletes the empty hash map after
180 // the swap in the success case.
181 STLValueDeleter<Directory::MetahandlesMap> deleter(&tmp_handles_map);
182
183 JournalIndex delete_journals;
184
185 DirOpenResult result =
186 store_->Load(&tmp_handles_map, &delete_journals, &info);
187 if (OPENED != result)
188 return result;
189
190 kernel_ = new Kernel(name, info, delegate, transaction_observer);
191 delete_journal_.reset(new DeleteJournal(&delete_journals));
192 InitializeIndices(&tmp_handles_map);
193
194 // Write back the share info to reserve some space in 'next_id'. This will
195 // prevent local ID reuse in the case of an early crash. See the comments in
196 // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
197 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
198 if (!SaveChanges())
199 return FAILED_INITIAL_WRITE;
200
201 return OPENED;
202 }
203
delete_journal()204 DeleteJournal* Directory::delete_journal() {
205 DCHECK(delete_journal_.get());
206 return delete_journal_.get();
207 }
208
Close()209 void Directory::Close() {
210 store_.reset();
211 if (kernel_) {
212 delete kernel_;
213 kernel_ = NULL;
214 }
215 }
216
OnUnrecoverableError(const BaseTransaction * trans,const tracked_objects::Location & location,const std::string & message)217 void Directory::OnUnrecoverableError(const BaseTransaction* trans,
218 const tracked_objects::Location& location,
219 const std::string & message) {
220 DCHECK(trans != NULL);
221 unrecoverable_error_set_ = true;
222 unrecoverable_error_handler_->OnUnrecoverableError(location,
223 message);
224 }
225
GetEntryById(const Id & id)226 EntryKernel* Directory::GetEntryById(const Id& id) {
227 ScopedKernelLock lock(this);
228 return GetEntryById(lock, id);
229 }
230
GetEntryById(const ScopedKernelLock & lock,const Id & id)231 EntryKernel* Directory::GetEntryById(const ScopedKernelLock& lock,
232 const Id& id) {
233 DCHECK(kernel_);
234 // Find it in the in memory ID index.
235 IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
236 if (id_found != kernel_->ids_map.end()) {
237 return id_found->second;
238 }
239 return NULL;
240 }
241
GetEntryByClientTag(const string & tag)242 EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
243 ScopedKernelLock lock(this);
244 DCHECK(kernel_);
245
246 TagsMap::iterator it = kernel_->client_tags_map.find(tag);
247 if (it != kernel_->client_tags_map.end()) {
248 return it->second;
249 }
250 return NULL;
251 }
252
GetEntryByServerTag(const string & tag)253 EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
254 ScopedKernelLock lock(this);
255 DCHECK(kernel_);
256 TagsMap::iterator it = kernel_->server_tags_map.find(tag);
257 if (it != kernel_->server_tags_map.end()) {
258 return it->second;
259 }
260 return NULL;
261 }
262
GetEntryByHandle(int64 metahandle)263 EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
264 ScopedKernelLock lock(this);
265 return GetEntryByHandle(lock, metahandle);
266 }
267
GetEntryByHandle(const ScopedKernelLock & lock,int64 metahandle)268 EntryKernel* Directory::GetEntryByHandle(const ScopedKernelLock& lock,
269 int64 metahandle) {
270 // Look up in memory
271 MetahandlesMap::iterator found =
272 kernel_->metahandles_map.find(metahandle);
273 if (found != kernel_->metahandles_map.end()) {
274 // Found it in memory. Easy.
275 return found->second;
276 }
277 return NULL;
278 }
279
GetChildHandlesById(BaseTransaction * trans,const Id & parent_id,Directory::Metahandles * result)280 bool Directory::GetChildHandlesById(
281 BaseTransaction* trans, const Id& parent_id,
282 Directory::Metahandles* result) {
283 if (!SyncAssert(this == trans->directory(), FROM_HERE,
284 "Directories don't match", trans))
285 return false;
286 result->clear();
287
288 ScopedKernelLock lock(this);
289 AppendChildHandles(lock, parent_id, result);
290 return true;
291 }
292
GetTotalNodeCount(BaseTransaction * trans,EntryKernel * kernel) const293 int Directory::GetTotalNodeCount(
294 BaseTransaction* trans,
295 EntryKernel* kernel) const {
296 if (!SyncAssert(this == trans->directory(), FROM_HERE,
297 "Directories don't match", trans))
298 return false;
299
300 int count = 1;
301 std::deque<const OrderedChildSet*> child_sets;
302
303 GetChildSetForKernel(trans, kernel, &child_sets);
304 while (!child_sets.empty()) {
305 const OrderedChildSet* set = child_sets.front();
306 child_sets.pop_front();
307 for (OrderedChildSet::const_iterator it = set->begin();
308 it != set->end(); ++it) {
309 count++;
310 GetChildSetForKernel(trans, *it, &child_sets);
311 }
312 }
313
314 return count;
315 }
316
GetChildSetForKernel(BaseTransaction * trans,EntryKernel * kernel,std::deque<const OrderedChildSet * > * child_sets) const317 void Directory::GetChildSetForKernel(
318 BaseTransaction* trans,
319 EntryKernel* kernel,
320 std::deque<const OrderedChildSet*>* child_sets) const {
321 if (!kernel->ref(IS_DIR))
322 return; // Not a directory => no children.
323
324 const OrderedChildSet* descendants =
325 kernel_->parent_child_index.GetChildren(kernel->ref(ID));
326 if (!descendants)
327 return; // This directory has no children.
328
329 // Add our children to the list of items to be traversed.
330 child_sets->push_back(descendants);
331 }
332
GetPositionIndex(BaseTransaction * trans,EntryKernel * kernel) const333 int Directory::GetPositionIndex(
334 BaseTransaction* trans,
335 EntryKernel* kernel) const {
336 const OrderedChildSet* siblings =
337 kernel_->parent_child_index.GetChildren(kernel->ref(PARENT_ID));
338
339 OrderedChildSet::const_iterator it = siblings->find(kernel);
340 return std::distance(siblings->begin(), it);
341 }
342
InsertEntry(BaseWriteTransaction * trans,EntryKernel * entry)343 bool Directory::InsertEntry(BaseWriteTransaction* trans, EntryKernel* entry) {
344 ScopedKernelLock lock(this);
345 return InsertEntry(lock, trans, entry);
346 }
347
InsertEntry(const ScopedKernelLock & lock,BaseWriteTransaction * trans,EntryKernel * entry)348 bool Directory::InsertEntry(const ScopedKernelLock& lock,
349 BaseWriteTransaction* trans,
350 EntryKernel* entry) {
351 if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
352 return false;
353
354 static const char error[] = "Entry already in memory index.";
355
356 if (!SyncAssert(
357 kernel_->metahandles_map.insert(
358 std::make_pair(entry->ref(META_HANDLE), entry)).second,
359 FROM_HERE,
360 error,
361 trans)) {
362 return false;
363 }
364 if (!SyncAssert(
365 kernel_->ids_map.insert(
366 std::make_pair(entry->ref(ID).value(), entry)).second,
367 FROM_HERE,
368 error,
369 trans)) {
370 return false;
371 }
372 if (ParentChildIndex::ShouldInclude(entry)) {
373 if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
374 FROM_HERE,
375 error,
376 trans)) {
377 return false;
378 }
379 }
380 AddToAttachmentIndex(
381 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));
382
383 // Should NEVER be created with a client tag or server tag.
384 if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
385 "Server tag should be empty", trans)) {
386 return false;
387 }
388 if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
389 "Client tag should be empty", trans))
390 return false;
391
392 return true;
393 }
394
ReindexId(BaseWriteTransaction * trans,EntryKernel * const entry,const Id & new_id)395 bool Directory::ReindexId(BaseWriteTransaction* trans,
396 EntryKernel* const entry,
397 const Id& new_id) {
398 ScopedKernelLock lock(this);
399 if (NULL != GetEntryById(lock, new_id))
400 return false;
401
402 {
403 // Update the indices that depend on the ID field.
404 ScopedParentChildIndexUpdater updater_b(lock, entry,
405 &kernel_->parent_child_index);
406 size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
407 DCHECK_EQ(1U, num_erased);
408 entry->put(ID, new_id);
409 kernel_->ids_map[entry->ref(ID).value()] = entry;
410 }
411 return true;
412 }
413
ReindexParentId(BaseWriteTransaction * trans,EntryKernel * const entry,const Id & new_parent_id)414 bool Directory::ReindexParentId(BaseWriteTransaction* trans,
415 EntryKernel* const entry,
416 const Id& new_parent_id) {
417 ScopedKernelLock lock(this);
418
419 {
420 // Update the indices that depend on the PARENT_ID field.
421 ScopedParentChildIndexUpdater index_updater(lock, entry,
422 &kernel_->parent_child_index);
423 entry->put(PARENT_ID, new_parent_id);
424 }
425 return true;
426 }
427
RemoveFromAttachmentIndex(const ScopedKernelLock & lock,const int64 metahandle,const sync_pb::AttachmentMetadata & attachment_metadata)428 void Directory::RemoveFromAttachmentIndex(
429 const ScopedKernelLock& lock,
430 const int64 metahandle,
431 const sync_pb::AttachmentMetadata& attachment_metadata) {
432 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
433 AttachmentIdUniqueId unique_id =
434 attachment_metadata.record(i).id().unique_id();
435 IndexByAttachmentId::iterator iter =
436 kernel_->index_by_attachment_id.find(unique_id);
437 if (iter != kernel_->index_by_attachment_id.end()) {
438 iter->second.erase(metahandle);
439 if (iter->second.empty()) {
440 kernel_->index_by_attachment_id.erase(iter);
441 }
442 }
443 }
444 }
445
AddToAttachmentIndex(const ScopedKernelLock & lock,const int64 metahandle,const sync_pb::AttachmentMetadata & attachment_metadata)446 void Directory::AddToAttachmentIndex(
447 const ScopedKernelLock& lock,
448 const int64 metahandle,
449 const sync_pb::AttachmentMetadata& attachment_metadata) {
450 for (int i = 0; i < attachment_metadata.record_size(); ++i) {
451 AttachmentIdUniqueId unique_id =
452 attachment_metadata.record(i).id().unique_id();
453 IndexByAttachmentId::iterator iter =
454 kernel_->index_by_attachment_id.find(unique_id);
455 if (iter == kernel_->index_by_attachment_id.end()) {
456 iter = kernel_->index_by_attachment_id.insert(std::make_pair(
457 unique_id,
458 MetahandleSet())).first;
459 }
460 iter->second.insert(metahandle);
461 }
462 }
463
UpdateAttachmentIndex(const int64 metahandle,const sync_pb::AttachmentMetadata & old_metadata,const sync_pb::AttachmentMetadata & new_metadata)464 void Directory::UpdateAttachmentIndex(
465 const int64 metahandle,
466 const sync_pb::AttachmentMetadata& old_metadata,
467 const sync_pb::AttachmentMetadata& new_metadata) {
468 ScopedKernelLock lock(this);
469 RemoveFromAttachmentIndex(lock, metahandle, old_metadata);
470 AddToAttachmentIndex(lock, metahandle, new_metadata);
471 }
472
GetMetahandlesByAttachmentId(BaseTransaction * trans,const sync_pb::AttachmentIdProto & attachment_id_proto,Metahandles * result)473 void Directory::GetMetahandlesByAttachmentId(
474 BaseTransaction* trans,
475 const sync_pb::AttachmentIdProto& attachment_id_proto,
476 Metahandles* result) {
477 DCHECK(result);
478 result->clear();
479 ScopedKernelLock lock(this);
480 IndexByAttachmentId::const_iterator index_iter =
481 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
482 if (index_iter == kernel_->index_by_attachment_id.end())
483 return;
484 const MetahandleSet& metahandle_set = index_iter->second;
485 std::copy(
486 metahandle_set.begin(), metahandle_set.end(), back_inserter(*result));
487 }
488
unrecoverable_error_set(const BaseTransaction * trans) const489 bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
490 DCHECK(trans != NULL);
491 return unrecoverable_error_set_;
492 }
493
ClearDirtyMetahandles(const ScopedKernelLock & lock)494 void Directory::ClearDirtyMetahandles(const ScopedKernelLock& lock) {
495 kernel_->transaction_mutex.AssertAcquired();
496 kernel_->dirty_metahandles.clear();
497 }
498
SafeToPurgeFromMemory(WriteTransaction * trans,const EntryKernel * const entry) const499 bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
500 const EntryKernel* const entry) const {
501 bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
502 !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
503 !entry->ref(IS_UNSYNCED);
504
505 if (safe) {
506 int64 handle = entry->ref(META_HANDLE);
507 const ModelType type = entry->GetServerModelType();
508 if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
509 FROM_HERE,
510 "Dirty metahandles should be empty", trans))
511 return false;
512 // TODO(tim): Bug 49278.
513 if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
514 FROM_HERE,
515 "Unsynced handles should be empty",
516 trans))
517 return false;
518 if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
519 FROM_HERE,
520 "Unapplied metahandles should be empty",
521 trans))
522 return false;
523 }
524
525 return safe;
526 }
527
TakeSnapshotForSaveChanges(SaveChangesSnapshot * snapshot)528 void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
529 ReadTransaction trans(FROM_HERE, this);
530 ScopedKernelLock lock(this);
531
532 // If there is an unrecoverable error then just bail out.
533 if (unrecoverable_error_set(&trans))
534 return;
535
536 // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
537 // clear dirty flags.
538 for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
539 i != kernel_->dirty_metahandles.end(); ++i) {
540 EntryKernel* entry = GetEntryByHandle(lock, *i);
541 if (!entry)
542 continue;
543 // Skip over false positives; it happens relatively infrequently.
544 if (!entry->is_dirty())
545 continue;
546 snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
547 new EntryKernel(*entry));
548 DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
549 // We don't bother removing from the index here as we blow the entire thing
550 // in a moment, and it unnecessarily complicates iteration.
551 entry->clear_dirty(NULL);
552 }
553 ClearDirtyMetahandles(lock);
554
555 // Set purged handles.
556 DCHECK(snapshot->metahandles_to_purge.empty());
557 snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
558
559 // Fill kernel_info_status and kernel_info.
560 snapshot->kernel_info = kernel_->persisted_info;
561 // To avoid duplicates when the process crashes, we record the next_id to be
562 // greater magnitude than could possibly be reached before the next save
563 // changes. In other words, it's effectively impossible for the user to
564 // generate 65536 new bookmarks in 3 seconds.
565 snapshot->kernel_info.next_id -= 65536;
566 snapshot->kernel_info_status = kernel_->info_status;
567 // This one we reset on failure.
568 kernel_->info_status = KERNEL_SHARE_INFO_VALID;
569
570 delete_journal_->TakeSnapshotAndClear(
571 &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
572 }
573
SaveChanges()574 bool Directory::SaveChanges() {
575 bool success = false;
576
577 base::AutoLock scoped_lock(kernel_->save_changes_mutex);
578
579 // Snapshot and save.
580 SaveChangesSnapshot snapshot;
581 TakeSnapshotForSaveChanges(&snapshot);
582 success = store_->SaveChanges(snapshot);
583
584 // Handle success or failure.
585 if (success)
586 success = VacuumAfterSaveChanges(snapshot);
587 else
588 HandleSaveChangesFailure(snapshot);
589 return success;
590 }
591
VacuumAfterSaveChanges(const SaveChangesSnapshot & snapshot)592 bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
593 if (snapshot.dirty_metas.empty())
594 return true;
595
596 // Need a write transaction as we are about to permanently purge entries.
597 WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
598 ScopedKernelLock lock(this);
599 // Now drop everything we can out of memory.
600 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
601 i != snapshot.dirty_metas.end(); ++i) {
602 MetahandlesMap::iterator found =
603 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
604 EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
605 NULL : found->second);
606 if (entry && SafeToPurgeFromMemory(&trans, entry)) {
607 // We now drop deleted metahandles that are up to date on both the client
608 // and the server.
609 size_t num_erased = 0;
610 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
611 DCHECK_EQ(1u, num_erased);
612 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
613 DCHECK_EQ(1u, num_erased);
614 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
615 num_erased =
616 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
617 DCHECK_EQ(1u, num_erased);
618 }
619 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
620 num_erased =
621 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
622 DCHECK_EQ(1u, num_erased);
623 }
624 if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
625 FROM_HERE,
626 "Deleted entry still present",
627 (&trans)))
628 return false;
629 RemoveFromAttachmentIndex(
630 lock, entry->ref(META_HANDLE), entry->ref(ATTACHMENT_METADATA));
631
632 delete entry;
633 }
634 if (trans.unrecoverable_error_set())
635 return false;
636 }
637 return true;
638 }
639
UnapplyEntry(EntryKernel * entry)640 void Directory::UnapplyEntry(EntryKernel* entry) {
641 int64 handle = entry->ref(META_HANDLE);
642 ModelType server_type = GetModelTypeFromSpecifics(
643 entry->ref(SERVER_SPECIFICS));
644
645 // Clear enough so that on the next sync cycle all local data will
646 // be overwritten.
647 // Note: do not modify the root node in order to preserve the
648 // initial sync ended bit for this type (else on the next restart
649 // this type will be treated as disabled and therefore fully purged).
650 if (IsRealDataType(server_type) &&
651 ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
652 return;
653 }
654
655 // Set the unapplied bit if this item has server data.
656 if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
657 entry->put(IS_UNAPPLIED_UPDATE, true);
658 kernel_->unapplied_update_metahandles[server_type].insert(handle);
659 entry->mark_dirty(&kernel_->dirty_metahandles);
660 }
661
662 // Unset the unsynced bit.
663 if (entry->ref(IS_UNSYNCED)) {
664 kernel_->unsynced_metahandles.erase(handle);
665 entry->put(IS_UNSYNCED, false);
666 entry->mark_dirty(&kernel_->dirty_metahandles);
667 }
668
669 // Mark the item as locally deleted. No deleted items are allowed in the
670 // parent child index.
671 if (!entry->ref(IS_DEL)) {
672 kernel_->parent_child_index.Remove(entry);
673 entry->put(IS_DEL, true);
674 entry->mark_dirty(&kernel_->dirty_metahandles);
675 }
676
677 // Set the version to the "newly created" version.
678 if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
679 entry->put(BASE_VERSION, CHANGES_VERSION);
680 entry->mark_dirty(&kernel_->dirty_metahandles);
681 }
682
683 // At this point locally created items that aren't synced will become locally
684 // deleted items, and purged on the next snapshot. All other items will match
685 // the state they would have had if they were just created via a server
686 // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
687 }
688
DeleteEntry(const ScopedKernelLock & lock,bool save_to_journal,EntryKernel * entry,EntryKernelSet * entries_to_journal)689 void Directory::DeleteEntry(const ScopedKernelLock& lock,
690 bool save_to_journal,
691 EntryKernel* entry,
692 EntryKernelSet* entries_to_journal) {
693 int64 handle = entry->ref(META_HANDLE);
694 ModelType server_type = GetModelTypeFromSpecifics(
695 entry->ref(SERVER_SPECIFICS));
696
697 kernel_->metahandles_to_purge.insert(handle);
698
699 size_t num_erased = 0;
700 num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
701 DCHECK_EQ(1u, num_erased);
702 num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
703 DCHECK_EQ(1u, num_erased);
704 num_erased = kernel_->unsynced_metahandles.erase(handle);
705 DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
706 num_erased =
707 kernel_->unapplied_update_metahandles[server_type].erase(handle);
708 DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
709 if (kernel_->parent_child_index.Contains(entry))
710 kernel_->parent_child_index.Remove(entry);
711
712 if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
713 num_erased =
714 kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
715 DCHECK_EQ(1u, num_erased);
716 }
717 if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
718 num_erased =
719 kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
720 DCHECK_EQ(1u, num_erased);
721 }
722 RemoveFromAttachmentIndex(lock, handle, entry->ref(ATTACHMENT_METADATA));
723
724 if (save_to_journal) {
725 entries_to_journal->insert(entry);
726 } else {
727 delete entry;
728 }
729 }
730
PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,ModelTypeSet types_to_journal,ModelTypeSet types_to_unapply)731 bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
732 ModelTypeSet types_to_journal,
733 ModelTypeSet types_to_unapply) {
734 disabled_types.RemoveAll(ProxyTypes());
735
736 if (disabled_types.Empty())
737 return true;
738
739 {
740 WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
741
742 EntryKernelSet entries_to_journal;
743 STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
744
745 {
746 ScopedKernelLock lock(this);
747
748 bool found_progress = false;
749 for (ModelTypeSet::Iterator iter = disabled_types.First(); iter.Good();
750 iter.Inc()) {
751 if (!kernel_->persisted_info.HasEmptyDownloadProgress(iter.Get()))
752 found_progress = true;
753 }
754
755 // If none of the disabled types have progress markers, there's nothing to
756 // purge.
757 if (!found_progress)
758 return true;
759
760 // We iterate in two passes to avoid a bug in STLport (which is used in
761 // the Android build). There are some versions of that library where a
762 // hash_map's iterators can be invalidated when an item is erased from the
763 // hash_map.
764 // See http://sourceforge.net/p/stlport/bugs/239/.
765
766 std::set<EntryKernel*> to_purge;
767 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
768 it != kernel_->metahandles_map.end(); ++it) {
769 const sync_pb::EntitySpecifics& local_specifics =
770 it->second->ref(SPECIFICS);
771 const sync_pb::EntitySpecifics& server_specifics =
772 it->second->ref(SERVER_SPECIFICS);
773 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
774 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
775
776 if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
777 (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
778 to_purge.insert(it->second);
779 }
780 }
781
782 for (std::set<EntryKernel*>::iterator it = to_purge.begin();
783 it != to_purge.end(); ++it) {
784 EntryKernel* entry = *it;
785
786 const sync_pb::EntitySpecifics& local_specifics =
787 (*it)->ref(SPECIFICS);
788 const sync_pb::EntitySpecifics& server_specifics =
789 (*it)->ref(SERVER_SPECIFICS);
790 ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
791 ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
792
793 if (types_to_unapply.Has(local_type) ||
794 types_to_unapply.Has(server_type)) {
795 UnapplyEntry(entry);
796 } else {
797 bool save_to_journal =
798 (types_to_journal.Has(local_type) ||
799 types_to_journal.Has(server_type)) &&
800 (delete_journal_->IsDeleteJournalEnabled(local_type) ||
801 delete_journal_->IsDeleteJournalEnabled(server_type));
802 DeleteEntry(lock, save_to_journal, entry, &entries_to_journal);
803 }
804 }
805
806 delete_journal_->AddJournalBatch(&trans, entries_to_journal);
807
808 // Ensure meta tracking for these data types reflects the purged state.
809 for (ModelTypeSet::Iterator it = disabled_types.First();
810 it.Good(); it.Inc()) {
811 kernel_->persisted_info.transaction_version[it.Get()] = 0;
812
813 // Don't discard progress markers or context for unapplied types.
814 if (!types_to_unapply.Has(it.Get())) {
815 kernel_->persisted_info.ResetDownloadProgress(it.Get());
816 kernel_->persisted_info.datatype_context[it.Get()].Clear();
817 }
818 }
819
820 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
821 }
822 }
823 return true;
824 }
825
ResetVersionsForType(BaseWriteTransaction * trans,ModelType type)826 bool Directory::ResetVersionsForType(BaseWriteTransaction* trans,
827 ModelType type) {
828 if (!ProtocolTypes().Has(type))
829 return false;
830 DCHECK_NE(type, BOOKMARKS) << "Only non-hierarchical types are supported";
831
832 EntryKernel* type_root = GetEntryByServerTag(ModelTypeToRootTag(type));
833 if (!type_root)
834 return false;
835
836 ScopedKernelLock lock(this);
837 const Id& type_root_id = type_root->ref(ID);
838 Directory::Metahandles children;
839 AppendChildHandles(lock, type_root_id, &children);
840
841 for (Metahandles::iterator it = children.begin(); it != children.end();
842 ++it) {
843 EntryKernel* entry = GetEntryByHandle(lock, *it);
844 if (!entry)
845 continue;
846 if (entry->ref(BASE_VERSION) > 1)
847 entry->put(BASE_VERSION, 1);
848 if (entry->ref(SERVER_VERSION) > 1)
849 entry->put(SERVER_VERSION, 1);
850
851 // Note that we do not unset IS_UNSYNCED or IS_UNAPPLIED_UPDATE in order
852 // to ensure no in-transit data is lost.
853
854 entry->mark_dirty(&kernel_->dirty_metahandles);
855 }
856
857 return true;
858 }
859
IsAttachmentLinked(const sync_pb::AttachmentIdProto & attachment_id_proto) const860 bool Directory::IsAttachmentLinked(
861 const sync_pb::AttachmentIdProto& attachment_id_proto) const {
862 ScopedKernelLock lock(this);
863 IndexByAttachmentId::const_iterator iter =
864 kernel_->index_by_attachment_id.find(attachment_id_proto.unique_id());
865 if (iter != kernel_->index_by_attachment_id.end() && !iter->second.empty()) {
866 return true;
867 }
868 return false;
869 }
870
HandleSaveChangesFailure(const SaveChangesSnapshot & snapshot)871 void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
872 WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
873 ScopedKernelLock lock(this);
874 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
875
876 // Because we optimistically cleared the dirty bit on the real entries when
877 // taking the snapshot, we must restore it on failure. Not doing this could
878 // cause lost data, if no other changes are made to the in-memory entries
879 // that would cause the dirty bit to get set again. Setting the bit ensures
880 // that SaveChanges will at least try again later.
881 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
882 i != snapshot.dirty_metas.end(); ++i) {
883 MetahandlesMap::iterator found =
884 kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
885 if (found != kernel_->metahandles_map.end()) {
886 found->second->mark_dirty(&kernel_->dirty_metahandles);
887 }
888 }
889
890 kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
891 snapshot.metahandles_to_purge.end());
892
893 // Restore delete journals.
894 delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
895 delete_journal_->PurgeDeleteJournals(&trans,
896 snapshot.delete_journals_to_purge);
897 }
898
GetDownloadProgress(ModelType model_type,sync_pb::DataTypeProgressMarker * value_out) const899 void Directory::GetDownloadProgress(
900 ModelType model_type,
901 sync_pb::DataTypeProgressMarker* value_out) const {
902 ScopedKernelLock lock(this);
903 return value_out->CopyFrom(
904 kernel_->persisted_info.download_progress[model_type]);
905 }
906
GetDownloadProgressAsString(ModelType model_type,std::string * value_out) const907 void Directory::GetDownloadProgressAsString(
908 ModelType model_type,
909 std::string* value_out) const {
910 ScopedKernelLock lock(this);
911 kernel_->persisted_info.download_progress[model_type].SerializeToString(
912 value_out);
913 }
914
GetEntriesCount() const915 size_t Directory::GetEntriesCount() const {
916 ScopedKernelLock lock(this);
917 return kernel_->metahandles_map.size();
918 }
919
SetDownloadProgress(ModelType model_type,const sync_pb::DataTypeProgressMarker & new_progress)920 void Directory::SetDownloadProgress(
921 ModelType model_type,
922 const sync_pb::DataTypeProgressMarker& new_progress) {
923 ScopedKernelLock lock(this);
924 kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
925 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
926 }
927
GetTransactionVersion(ModelType type) const928 int64 Directory::GetTransactionVersion(ModelType type) const {
929 kernel_->transaction_mutex.AssertAcquired();
930 return kernel_->persisted_info.transaction_version[type];
931 }
932
IncrementTransactionVersion(ModelType type)933 void Directory::IncrementTransactionVersion(ModelType type) {
934 kernel_->transaction_mutex.AssertAcquired();
935 kernel_->persisted_info.transaction_version[type]++;
936 }
937
GetDataTypeContext(BaseTransaction * trans,ModelType type,sync_pb::DataTypeContext * context) const938 void Directory::GetDataTypeContext(BaseTransaction* trans,
939 ModelType type,
940 sync_pb::DataTypeContext* context) const {
941 ScopedKernelLock lock(this);
942 context->CopyFrom(kernel_->persisted_info.datatype_context[type]);
943 }
944
SetDataTypeContext(BaseWriteTransaction * trans,ModelType type,const sync_pb::DataTypeContext & context)945 void Directory::SetDataTypeContext(
946 BaseWriteTransaction* trans,
947 ModelType type,
948 const sync_pb::DataTypeContext& context) {
949 ScopedKernelLock lock(this);
950 kernel_->persisted_info.datatype_context[type].CopyFrom(context);
951 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
952 }
953
InitialSyncEndedTypes()954 ModelTypeSet Directory::InitialSyncEndedTypes() {
955 syncable::ReadTransaction trans(FROM_HERE, this);
956 ModelTypeSet protocol_types = ProtocolTypes();
957 ModelTypeSet initial_sync_ended_types;
958 for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
959 if (InitialSyncEndedForType(&trans, i.Get())) {
960 initial_sync_ended_types.Put(i.Get());
961 }
962 }
963 return initial_sync_ended_types;
964 }
965
InitialSyncEndedForType(ModelType type)966 bool Directory::InitialSyncEndedForType(ModelType type) {
967 syncable::ReadTransaction trans(FROM_HERE, this);
968 return InitialSyncEndedForType(&trans, type);
969 }
970
InitialSyncEndedForType(BaseTransaction * trans,ModelType type)971 bool Directory::InitialSyncEndedForType(
972 BaseTransaction* trans, ModelType type) {
973 // True iff the type's root node has been received and applied.
974 syncable::Entry entry(trans, syncable::GET_TYPE_ROOT, type);
975 return entry.good() && entry.GetBaseVersion() != CHANGES_VERSION;
976 }
977
store_birthday() const978 string Directory::store_birthday() const {
979 ScopedKernelLock lock(this);
980 return kernel_->persisted_info.store_birthday;
981 }
982
set_store_birthday(const string & store_birthday)983 void Directory::set_store_birthday(const string& store_birthday) {
984 ScopedKernelLock lock(this);
985 if (kernel_->persisted_info.store_birthday == store_birthday)
986 return;
987 kernel_->persisted_info.store_birthday = store_birthday;
988 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
989 }
990
bag_of_chips() const991 string Directory::bag_of_chips() const {
992 ScopedKernelLock lock(this);
993 return kernel_->persisted_info.bag_of_chips;
994 }
995
set_bag_of_chips(const string & bag_of_chips)996 void Directory::set_bag_of_chips(const string& bag_of_chips) {
997 ScopedKernelLock lock(this);
998 if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
999 return;
1000 kernel_->persisted_info.bag_of_chips = bag_of_chips;
1001 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1002 }
1003
1004
cache_guid() const1005 string Directory::cache_guid() const {
1006 // No need to lock since nothing ever writes to it after load.
1007 return kernel_->cache_guid;
1008 }
1009
GetNigoriHandler()1010 NigoriHandler* Directory::GetNigoriHandler() {
1011 return nigori_handler_;
1012 }
1013
GetCryptographer(const BaseTransaction * trans)1014 Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
1015 DCHECK_EQ(this, trans->directory());
1016 return cryptographer_;
1017 }
1018
GetAllMetaHandles(BaseTransaction * trans,MetahandleSet * result)1019 void Directory::GetAllMetaHandles(BaseTransaction* trans,
1020 MetahandleSet* result) {
1021 result->clear();
1022 ScopedKernelLock lock(this);
1023 for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
1024 i != kernel_->metahandles_map.end(); ++i) {
1025 result->insert(i->first);
1026 }
1027 }
1028
GetUnsyncedMetaHandles(BaseTransaction * trans,Metahandles * result)1029 void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
1030 Metahandles* result) {
1031 result->clear();
1032 ScopedKernelLock lock(this);
1033 copy(kernel_->unsynced_metahandles.begin(),
1034 kernel_->unsynced_metahandles.end(), back_inserter(*result));
1035 }
1036
unsynced_entity_count() const1037 int64 Directory::unsynced_entity_count() const {
1038 ScopedKernelLock lock(this);
1039 return kernel_->unsynced_metahandles.size();
1040 }
1041
TypeHasUnappliedUpdates(ModelType type)1042 bool Directory::TypeHasUnappliedUpdates(ModelType type) {
1043 ScopedKernelLock lock(this);
1044 return !kernel_->unapplied_update_metahandles[type].empty();
1045 }
1046
GetUnappliedUpdateMetaHandles(BaseTransaction * trans,FullModelTypeSet server_types,std::vector<int64> * result)1047 void Directory::GetUnappliedUpdateMetaHandles(
1048 BaseTransaction* trans,
1049 FullModelTypeSet server_types,
1050 std::vector<int64>* result) {
1051 result->clear();
1052 ScopedKernelLock lock(this);
1053 for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
1054 const ModelType type = ModelTypeFromInt(i);
1055 if (server_types.Has(type)) {
1056 std::copy(kernel_->unapplied_update_metahandles[type].begin(),
1057 kernel_->unapplied_update_metahandles[type].end(),
1058 back_inserter(*result));
1059 }
1060 }
1061 }
1062
GetMetaHandlesOfType(BaseTransaction * trans,ModelType type,std::vector<int64> * result)1063 void Directory::GetMetaHandlesOfType(BaseTransaction* trans,
1064 ModelType type,
1065 std::vector<int64>* result) {
1066 ScopedKernelLock lock(this);
1067 GetMetaHandlesOfType(lock, trans, type, result);
1068 }
1069
GetMetaHandlesOfType(const ScopedKernelLock & lock,BaseTransaction * trans,ModelType type,std::vector<int64> * result)1070 void Directory::GetMetaHandlesOfType(const ScopedKernelLock& lock,
1071 BaseTransaction* trans,
1072 ModelType type,
1073 std::vector<int64>* result) {
1074 result->clear();
1075 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1076 it != kernel_->metahandles_map.end(); ++it) {
1077 EntryKernel* entry = it->second;
1078 const ModelType entry_type =
1079 GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1080 if (entry_type == type)
1081 result->push_back(it->first);
1082 }
1083 }
1084
CollectMetaHandleCounts(std::vector<int> * num_entries_by_type,std::vector<int> * num_to_delete_entries_by_type)1085 void Directory::CollectMetaHandleCounts(
1086 std::vector<int>* num_entries_by_type,
1087 std::vector<int>* num_to_delete_entries_by_type) {
1088 syncable::ReadTransaction trans(FROM_HERE, this);
1089 ScopedKernelLock lock(this);
1090
1091 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1092 it != kernel_->metahandles_map.end(); ++it) {
1093 EntryKernel* entry = it->second;
1094 const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
1095 (*num_entries_by_type)[type]++;
1096 if (entry->ref(IS_DEL))
1097 (*num_to_delete_entries_by_type)[type]++;
1098 }
1099 }
1100
GetNodeDetailsForType(BaseTransaction * trans,ModelType type)1101 scoped_ptr<base::ListValue> Directory::GetNodeDetailsForType(
1102 BaseTransaction* trans,
1103 ModelType type) {
1104 scoped_ptr<base::ListValue> nodes(new base::ListValue());
1105
1106 ScopedKernelLock lock(this);
1107 for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
1108 it != kernel_->metahandles_map.end(); ++it) {
1109 if (GetModelTypeFromSpecifics(it->second->ref(SPECIFICS)) != type) {
1110 continue;
1111 }
1112
1113 EntryKernel* kernel = it->second;
1114 scoped_ptr<base::DictionaryValue> node(
1115 kernel->ToValue(GetCryptographer(trans)));
1116
1117 // Add the position index if appropriate. This must be done here (and not
1118 // in EntryKernel) because the EntryKernel does not have access to its
1119 // siblings.
1120 if (kernel->ShouldMaintainPosition() && !kernel->ref(IS_DEL)) {
1121 node->SetInteger("positionIndex", GetPositionIndex(trans, kernel));
1122 }
1123
1124 nodes->Append(node.release());
1125 }
1126
1127 return nodes.Pass();
1128 }
1129
CheckInvariantsOnTransactionClose(syncable::BaseTransaction * trans,const MetahandleSet & modified_handles)1130 bool Directory::CheckInvariantsOnTransactionClose(
1131 syncable::BaseTransaction* trans,
1132 const MetahandleSet& modified_handles) {
1133 // NOTE: The trans may be in the process of being destructed. Be careful if
1134 // you wish to call any of its virtual methods.
1135 switch (invariant_check_level_) {
1136 case FULL_DB_VERIFICATION: {
1137 MetahandleSet all_handles;
1138 GetAllMetaHandles(trans, &all_handles);
1139 return CheckTreeInvariants(trans, all_handles);
1140 }
1141 case VERIFY_CHANGES: {
1142 return CheckTreeInvariants(trans, modified_handles);
1143 }
1144 case OFF: {
1145 return true;
1146 }
1147 }
1148 NOTREACHED();
1149 return false;
1150 }
1151
FullyCheckTreeInvariants(syncable::BaseTransaction * trans)1152 bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
1153 MetahandleSet handles;
1154 GetAllMetaHandles(trans, &handles);
1155 return CheckTreeInvariants(trans, handles);
1156 }
1157
CheckTreeInvariants(syncable::BaseTransaction * trans,const MetahandleSet & handles)1158 bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
1159 const MetahandleSet& handles) {
1160 MetahandleSet::const_iterator i;
1161 for (i = handles.begin() ; i != handles.end() ; ++i) {
1162 int64 metahandle = *i;
1163 Entry e(trans, GET_BY_HANDLE, metahandle);
1164 if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
1165 return false;
1166 syncable::Id id = e.GetId();
1167 syncable::Id parentid = e.GetParentId();
1168
1169 if (id.IsRoot()) {
1170 if (!SyncAssert(e.GetIsDir(), FROM_HERE,
1171 "Entry should be a directory",
1172 trans))
1173 return false;
1174 if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
1175 "Entry should be root",
1176 trans))
1177 return false;
1178 if (!SyncAssert(!e.GetIsUnsynced(), FROM_HERE,
1179 "Entry should be sycned",
1180 trans))
1181 return false;
1182 continue;
1183 }
1184
1185 if (!e.GetIsDel()) {
1186 if (!SyncAssert(id != parentid, FROM_HERE,
1187 "Id should be different from parent id.",
1188 trans))
1189 return false;
1190 if (!SyncAssert(!e.GetNonUniqueName().empty(), FROM_HERE,
1191 "Non unique name should not be empty.",
1192 trans))
1193 return false;
1194 int safety_count = handles.size() + 1;
1195 while (!parentid.IsRoot()) {
1196 Entry parent(trans, GET_BY_ID, parentid);
1197 if (!SyncAssert(parent.good(), FROM_HERE,
1198 "Parent entry is not valid.",
1199 trans))
1200 return false;
1201 if (handles.end() == handles.find(parent.GetMetahandle()))
1202 break; // Skip further checking if parent was unmodified.
1203 if (!SyncAssert(parent.GetIsDir(), FROM_HERE,
1204 "Parent should be a directory",
1205 trans))
1206 return false;
1207 if (!SyncAssert(!parent.GetIsDel(), FROM_HERE,
1208 "Parent should not have been marked for deletion.",
1209 trans))
1210 return false;
1211 if (!SyncAssert(handles.end() != handles.find(parent.GetMetahandle()),
1212 FROM_HERE,
1213 "Parent should be in the index.",
1214 trans))
1215 return false;
1216 parentid = parent.GetParentId();
1217 if (!SyncAssert(--safety_count > 0, FROM_HERE,
1218 "Count should be greater than zero.",
1219 trans))
1220 return false;
1221 }
1222 }
1223 int64 base_version = e.GetBaseVersion();
1224 int64 server_version = e.GetServerVersion();
1225 bool using_unique_client_tag = !e.GetUniqueClientTag().empty();
1226 if (CHANGES_VERSION == base_version || 0 == base_version) {
1227 if (e.GetIsUnappliedUpdate()) {
1228 // Must be a new item, or a de-duplicated unique client tag
1229 // that was created both locally and remotely.
1230 if (!using_unique_client_tag) {
1231 if (!SyncAssert(e.GetIsDel(), FROM_HERE,
1232 "The entry should not have been deleted.",
1233 trans))
1234 return false;
1235 }
1236 // It came from the server, so it must have a server ID.
1237 if (!SyncAssert(id.ServerKnows(), FROM_HERE,
1238 "The id should be from a server.",
1239 trans))
1240 return false;
1241 } else {
1242 if (e.GetIsDir()) {
1243 // TODO(chron): Implement this mode if clients ever need it.
1244 // For now, you can't combine a client tag and a directory.
1245 if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
1246 "Directory cannot have a client tag.",
1247 trans))
1248 return false;
1249 }
1250 // Should be an uncomitted item, or a successfully deleted one.
1251 if (!e.GetIsDel()) {
1252 if (!SyncAssert(e.GetIsUnsynced(), FROM_HERE,
1253 "The item should be unsynced.",
1254 trans))
1255 return false;
1256 }
1257 // If the next check failed, it would imply that an item exists
1258 // on the server, isn't waiting for application locally, but either
1259 // is an unsynced create or a sucessful delete in the local copy.
1260 // Either way, that's a mismatch.
1261 if (!SyncAssert(0 == server_version, FROM_HERE,
1262 "Server version should be zero.",
1263 trans))
1264 return false;
1265 // Items that aren't using the unique client tag should have a zero
1266 // base version only if they have a local ID. Items with unique client
1267 // tags are allowed to use the zero base version for undeletion and
1268 // de-duplication; the unique client tag trumps the server ID.
1269 if (!using_unique_client_tag) {
1270 if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
1271 "Should be a client only id.",
1272 trans))
1273 return false;
1274 }
1275 }
1276 } else {
1277 if (!SyncAssert(id.ServerKnows(),
1278 FROM_HERE,
1279 "Should be a server id.",
1280 trans))
1281 return false;
1282 }
1283 // Server-unknown items that are locally deleted should not be sent up to
1284 // the server. They must be !IS_UNSYNCED.
1285 if (!SyncAssert(!(!id.ServerKnows() && e.GetIsDel() && e.GetIsUnsynced()),
1286 FROM_HERE,
1287 "Locally deleted item must not be unsynced.",
1288 trans)) {
1289 return false;
1290 }
1291 }
1292 return true;
1293 }
1294
SetInvariantCheckLevel(InvariantCheckLevel check_level)1295 void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
1296 invariant_check_level_ = check_level;
1297 }
1298
NextMetahandle()1299 int64 Directory::NextMetahandle() {
1300 ScopedKernelLock lock(this);
1301 int64 metahandle = (kernel_->next_metahandle)++;
1302 return metahandle;
1303 }
1304
1305 // Always returns a client ID that is the string representation of a negative
1306 // number.
NextId()1307 Id Directory::NextId() {
1308 int64 result;
1309 {
1310 ScopedKernelLock lock(this);
1311 result = (kernel_->persisted_info.next_id)--;
1312 kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1313 }
1314 DCHECK_LT(result, 0);
1315 return Id::CreateFromClientString(base::Int64ToString(result));
1316 }
1317
HasChildren(BaseTransaction * trans,const Id & id)1318 bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
1319 ScopedKernelLock lock(this);
1320 return kernel_->parent_child_index.GetChildren(id) != NULL;
1321 }
1322
GetFirstChildId(BaseTransaction * trans,const EntryKernel * parent)1323 Id Directory::GetFirstChildId(BaseTransaction* trans,
1324 const EntryKernel* parent) {
1325 DCHECK(parent);
1326 DCHECK(parent->ref(IS_DIR));
1327
1328 ScopedKernelLock lock(this);
1329 const OrderedChildSet* children =
1330 kernel_->parent_child_index.GetChildren(parent->ref(ID));
1331
1332 // We're expected to return root if there are no children.
1333 if (!children)
1334 return Id();
1335
1336 return (*children->begin())->ref(ID);
1337 }
1338
GetPredecessorId(EntryKernel * e)1339 syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
1340 ScopedKernelLock lock(this);
1341
1342 DCHECK(ParentChildIndex::ShouldInclude(e));
1343 const OrderedChildSet* children =
1344 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1345 DCHECK(children && !children->empty());
1346 OrderedChildSet::const_iterator i = children->find(e);
1347 DCHECK(i != children->end());
1348
1349 if (i == children->begin()) {
1350 return Id();
1351 } else {
1352 i--;
1353 return (*i)->ref(ID);
1354 }
1355 }
1356
GetSuccessorId(EntryKernel * e)1357 syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
1358 ScopedKernelLock lock(this);
1359
1360 DCHECK(ParentChildIndex::ShouldInclude(e));
1361 const OrderedChildSet* children =
1362 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1363 DCHECK(children && !children->empty());
1364 OrderedChildSet::const_iterator i = children->find(e);
1365 DCHECK(i != children->end());
1366
1367 i++;
1368 if (i == children->end()) {
1369 return Id();
1370 } else {
1371 return (*i)->ref(ID);
1372 }
1373 }
1374
1375 // TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1376 // items as siblings of items that do not maintain postions. It is required
1377 // only for tests. See crbug.com/178282.
PutPredecessor(EntryKernel * e,EntryKernel * predecessor)1378 void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
1379 DCHECK(!e->ref(IS_DEL));
1380 if (!e->ShouldMaintainPosition()) {
1381 DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
1382 return;
1383 }
1384 std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
1385 DCHECK(!suffix.empty());
1386
1387 // Remove our item from the ParentChildIndex and remember to re-add it later.
1388 ScopedKernelLock lock(this);
1389 ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
1390
1391 // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1392 // leave this function.
1393 const OrderedChildSet* siblings =
1394 kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1395
1396 if (!siblings) {
1397 // This parent currently has no other children.
1398 DCHECK(predecessor->ref(ID).IsRoot());
1399 UniquePosition pos = UniquePosition::InitialPosition(suffix);
1400 e->put(UNIQUE_POSITION, pos);
1401 return;
1402 }
1403
1404 if (predecessor->ref(ID).IsRoot()) {
1405 // We have at least one sibling, and we're inserting to the left of them.
1406 UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1407
1408 UniquePosition pos;
1409 if (!successor_pos.IsValid()) {
1410 // If all our successors are of non-positionable types, just create an
1411 // initial position. We arbitrarily choose to sort invalid positions to
1412 // the right of the valid positions.
1413 //
1414 // We really shouldn't need to support this. See TODO above.
1415 pos = UniquePosition::InitialPosition(suffix);
1416 } else {
1417 DCHECK(!siblings->empty());
1418 pos = UniquePosition::Before(successor_pos, suffix);
1419 }
1420
1421 e->put(UNIQUE_POSITION, pos);
1422 return;
1423 }
1424
1425 // We can't support placing an item after an invalid position. Fortunately,
1426 // the tests don't exercise this particular case. We should not support
1427 // siblings with invalid positions at all. See TODO above.
1428 DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
1429
1430 OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
1431 DCHECK(neighbour != siblings->end());
1432
1433 ++neighbour;
1434 if (neighbour == siblings->end()) {
1435 // Inserting at the end of the list.
1436 UniquePosition pos = UniquePosition::After(
1437 predecessor->ref(UNIQUE_POSITION),
1438 suffix);
1439 e->put(UNIQUE_POSITION, pos);
1440 return;
1441 }
1442
1443 EntryKernel* successor = *neighbour;
1444
1445 // Another mixed valid and invalid position case. This one could be supported
1446 // in theory, but we're trying to deprecate support for siblings with and
1447 // without valid positions. See TODO above.
1448 DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
1449
1450 // Finally, the normal case: inserting between two elements.
1451 UniquePosition pos = UniquePosition::Between(
1452 predecessor->ref(UNIQUE_POSITION),
1453 successor->ref(UNIQUE_POSITION),
1454 suffix);
1455 e->put(UNIQUE_POSITION, pos);
1456 return;
1457 }
1458
1459 // TODO(rlarocque): Avoid this indirection. Just return the set.
AppendChildHandles(const ScopedKernelLock & lock,const Id & parent_id,Directory::Metahandles * result)1460 void Directory::AppendChildHandles(const ScopedKernelLock& lock,
1461 const Id& parent_id,
1462 Directory::Metahandles* result) {
1463 const OrderedChildSet* children =
1464 kernel_->parent_child_index.GetChildren(parent_id);
1465 if (!children)
1466 return;
1467
1468 for (OrderedChildSet::const_iterator i = children->begin();
1469 i != children->end(); ++i) {
1470 DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
1471 result->push_back((*i)->ref(META_HANDLE));
1472 }
1473 }
1474
UnmarkDirtyEntry(WriteTransaction * trans,Entry * entry)1475 void Directory::UnmarkDirtyEntry(WriteTransaction* trans, Entry* entry) {
1476 CHECK(trans);
1477 entry->kernel_->clear_dirty(&kernel_->dirty_metahandles);
1478 }
1479
GetAttachmentIdsToUpload(BaseTransaction * trans,ModelType type,AttachmentIdSet * id_set)1480 void Directory::GetAttachmentIdsToUpload(BaseTransaction* trans,
1481 ModelType type,
1482 AttachmentIdSet* id_set) {
1483 // TODO(maniscalco): Maintain an index by ModelType and rewrite this method to
1484 // use it. The approach below is likely very expensive because it iterates
1485 // all entries (bug 415199).
1486 DCHECK(trans);
1487 DCHECK(id_set);
1488 id_set->clear();
1489 AttachmentIdSet on_server_id_set;
1490 AttachmentIdSet not_on_server_id_set;
1491 std::vector<int64> metahandles;
1492 {
1493 ScopedKernelLock lock(this);
1494 GetMetaHandlesOfType(lock, trans, type, &metahandles);
1495 std::vector<int64>::const_iterator iter = metahandles.begin();
1496 const std::vector<int64>::const_iterator end = metahandles.end();
1497 // For all of this type's entries...
1498 for (; iter != end; ++iter) {
1499 EntryKernel* entry = GetEntryByHandle(lock, *iter);
1500 DCHECK(entry);
1501 const sync_pb::AttachmentMetadata metadata =
1502 entry->ref(ATTACHMENT_METADATA);
1503 // for each of this entry's attachments...
1504 for (int i = 0; i < metadata.record_size(); ++i) {
1505 AttachmentId id =
1506 AttachmentId::CreateFromProto(metadata.record(i).id());
1507 // if this attachment is known to be on the server, remember it for
1508 // later,
1509 if (metadata.record(i).is_on_server()) {
1510 on_server_id_set.insert(id);
1511 } else {
1512 // otherwise, add it to id_set.
1513 not_on_server_id_set.insert(id);
1514 }
1515 }
1516 }
1517 }
1518 // Why did we bother keeping a set of ids known to be on the server? The
1519 // is_on_server flag is stored denormalized so we can end up with two entries
1520 // with the same attachment id where one says it's on the server and the other
1521 // says it's not. When this happens, we trust the one that says it's on the
1522 // server. To avoid re-uploading the same attachment mulitple times, we
1523 // remove any ids known to be on the server from the id_set we are about to
1524 // return.
1525 //
1526 // TODO(maniscalco): Eliminate redundant metadata storage (bug 415203).
1527 std::set_difference(not_on_server_id_set.begin(),
1528 not_on_server_id_set.end(),
1529 on_server_id_set.begin(),
1530 on_server_id_set.end(),
1531 std::inserter(*id_set, id_set->end()));
1532 }
1533
1534 } // namespace syncable
1535 } // namespace syncer
1536