• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "sync/engine/syncer_util.h"
6 
7 #include <algorithm>
8 #include <set>
9 #include <string>
10 #include <vector>
11 
12 #include "base/base64.h"
13 #include "base/location.h"
14 #include "base/metrics/histogram.h"
15 #include "base/rand_util.h"
16 #include "base/strings/string_number_conversions.h"
17 #include "sync/engine/conflict_resolver.h"
18 #include "sync/engine/syncer_proto_util.h"
19 #include "sync/engine/syncer_types.h"
20 #include "sync/internal_api/public/base/model_type.h"
21 #include "sync/internal_api/public/base/unique_position.h"
22 #include "sync/protocol/bookmark_specifics.pb.h"
23 #include "sync/protocol/password_specifics.pb.h"
24 #include "sync/protocol/sync.pb.h"
25 #include "sync/syncable/directory.h"
26 #include "sync/syncable/entry.h"
27 #include "sync/syncable/model_neutral_mutable_entry.h"
28 #include "sync/syncable/mutable_entry.h"
29 #include "sync/syncable/syncable_changes_version.h"
30 #include "sync/syncable/syncable_model_neutral_write_transaction.h"
31 #include "sync/syncable/syncable_proto_util.h"
32 #include "sync/syncable/syncable_read_transaction.h"
33 #include "sync/syncable/syncable_util.h"
34 #include "sync/syncable/syncable_write_transaction.h"
35 #include "sync/util/cryptographer.h"
36 #include "sync/util/time.h"
37 
38 namespace syncer {
39 
40 using syncable::BASE_SERVER_SPECIFICS;
41 using syncable::BASE_VERSION;
42 using syncable::CHANGES_VERSION;
43 using syncable::CREATE_NEW_UPDATE_ITEM;
44 using syncable::CTIME;
45 using syncable::Directory;
46 using syncable::Entry;
47 using syncable::GET_BY_HANDLE;
48 using syncable::GET_BY_ID;
49 using syncable::ID;
50 using syncable::IS_DEL;
51 using syncable::IS_DIR;
52 using syncable::IS_UNAPPLIED_UPDATE;
53 using syncable::IS_UNSYNCED;
54 using syncable::Id;
55 using syncable::META_HANDLE;
56 using syncable::MTIME;
57 using syncable::MutableEntry;
58 using syncable::NON_UNIQUE_NAME;
59 using syncable::PARENT_ID;
60 using syncable::SERVER_CTIME;
61 using syncable::SERVER_IS_DEL;
62 using syncable::SERVER_IS_DIR;
63 using syncable::SERVER_MTIME;
64 using syncable::SERVER_NON_UNIQUE_NAME;
65 using syncable::SERVER_PARENT_ID;
66 using syncable::SERVER_SPECIFICS;
67 using syncable::SERVER_UNIQUE_POSITION;
68 using syncable::SERVER_VERSION;
69 using syncable::SPECIFICS;
70 using syncable::SYNCER;
71 using syncable::UNIQUE_BOOKMARK_TAG;
72 using syncable::UNIQUE_CLIENT_TAG;
73 using syncable::UNIQUE_POSITION;
74 using syncable::UNIQUE_SERVER_TAG;
75 using syncable::WriteTransaction;
76 
FindLocalIdToUpdate(syncable::BaseTransaction * trans,const sync_pb::SyncEntity & update)77 syncable::Id FindLocalIdToUpdate(
78     syncable::BaseTransaction* trans,
79     const sync_pb::SyncEntity& update) {
80   // Expected entry points of this function:
81   // SyncEntity has NOT been applied to SERVER fields.
82   // SyncEntity has NOT been applied to LOCAL fields.
83   // DB has not yet been modified, no entries created for this update.
84 
85   const std::string& client_id = trans->directory()->cache_guid();
86   const syncable::Id& update_id = SyncableIdFromProto(update.id_string());
87 
88   if (update.has_client_defined_unique_tag() &&
89       !update.client_defined_unique_tag().empty()) {
90     // When a server sends down a client tag, the following cases can occur:
91     // 1) Client has entry for tag already, ID is server style, matches
92     // 2) Client has entry for tag already, ID is server, doesn't match.
93     // 3) Client has entry for tag already, ID is local, (never matches)
94     // 4) Client has no entry for tag
95 
96     // Case 1, we don't have to do anything since the update will
97     // work just fine. Update will end up in the proper entry, via ID lookup.
98     // Case 2 - Happens very rarely due to lax enforcement of client tags
99     // on the server, if two clients commit the same tag at the same time.
100     // When this happens, we pick the lexically-least ID and ignore all other
101     // items.
102     // Case 3 - We need to replace the local ID with the server ID so that
103     // this update gets targeted at the correct local entry; we expect conflict
104     // resolution to occur.
105     // Case 4 - Perfect. Same as case 1.
106 
107     syncable::Entry local_entry(trans, syncable::GET_BY_CLIENT_TAG,
108                                 update.client_defined_unique_tag());
109 
110     // The SyncAPI equivalent of this function will return !good if IS_DEL.
111     // The syncable version will return good even if IS_DEL.
112     // TODO(chron): Unit test the case with IS_DEL and make sure.
113     if (local_entry.good()) {
114       if (local_entry.GetId().ServerKnows()) {
115         if (local_entry.GetId() != update_id) {
116           // Case 2.
117           LOG(WARNING) << "Duplicated client tag.";
118           if (local_entry.GetId() < update_id) {
119             // Signal an error; drop this update on the floor.  Note that
120             // we don't server delete the item, because we don't allow it to
121             // exist locally at all.  So the item will remain orphaned on
122             // the server, and we won't pay attention to it.
123             return syncable::GetNullId();
124           }
125         }
126         // Target this change to the existing local entry; later,
127         // we'll change the ID of the local entry to update_id
128         // if needed.
129         return local_entry.GetId();
130       } else {
131         // Case 3: We have a local entry with the same client tag.
132         // We should change the ID of the local entry to the server entry.
133         // This will result in an server ID with base version == 0, but that's
134         // a legal state for an item with a client tag.  By changing the ID,
135         // update will now be applied to local_entry.
136         DCHECK(0 == local_entry.GetBaseVersion() ||
137                CHANGES_VERSION == local_entry.GetBaseVersion());
138         return local_entry.GetId();
139       }
140     }
141   } else if (update.has_originator_cache_guid() &&
142       update.originator_cache_guid() == client_id) {
143     // If a commit succeeds, but the response does not come back fast enough
144     // then the syncer might assume that it was never committed.
145     // The server will track the client that sent up the original commit and
146     // return this in a get updates response. When this matches a local
147     // uncommitted item, we must mutate our local item and version to pick up
148     // the committed version of the same item whose commit response was lost.
149     // There is however still a race condition if the server has not
150     // completed the commit by the time the syncer tries to get updates
151     // again. To mitigate this, we need to have the server time out in
152     // a reasonable span, our commit batches have to be small enough
153     // to process within our HTTP response "assumed alive" time.
154 
155     // We need to check if we have an entry that didn't get its server
156     // id updated correctly. The server sends down a client ID
157     // and a local (negative) id. If we have a entry by that
158     // description, we should update the ID and version to the
159     // server side ones to avoid multiple copies of the same thing.
160 
161     syncable::Id client_item_id = syncable::Id::CreateFromClientString(
162         update.originator_client_item_id());
163     DCHECK(!client_item_id.ServerKnows());
164     syncable::Entry local_entry(trans, GET_BY_ID, client_item_id);
165 
166     // If it exists, then our local client lost a commit response.  Use
167     // the local entry.
168     if (local_entry.good() && !local_entry.GetIsDel()) {
169       int64 old_version = local_entry.GetBaseVersion();
170       int64 new_version = update.version();
171       DCHECK_LE(old_version, 0);
172       DCHECK_GT(new_version, 0);
173       // Otherwise setting the base version could cause a consistency failure.
174       // An entry should never be version 0 and SYNCED.
175       DCHECK(local_entry.GetIsUnsynced());
176 
177       // Just a quick sanity check.
178       DCHECK(!local_entry.GetId().ServerKnows());
179 
180       DVLOG(1) << "Reuniting lost commit response IDs. server id: "
181                << update_id << " local id: " << local_entry.GetId()
182                << " new version: " << new_version;
183 
184       return local_entry.GetId();
185     }
186   }
187   // Fallback: target an entry having the server ID, creating one if needed.
188   return update_id;
189 }
190 
AttemptToUpdateEntry(syncable::WriteTransaction * const trans,syncable::MutableEntry * const entry,Cryptographer * cryptographer)191 UpdateAttemptResponse AttemptToUpdateEntry(
192     syncable::WriteTransaction* const trans,
193     syncable::MutableEntry* const entry,
194     Cryptographer* cryptographer) {
195   CHECK(entry->good());
196   if (!entry->GetIsUnappliedUpdate())
197     return SUCCESS;  // No work to do.
198   syncable::Id id = entry->GetId();
199   const sync_pb::EntitySpecifics& specifics = entry->GetServerSpecifics();
200 
201   // Only apply updates that we can decrypt. If we can't decrypt the update, it
202   // is likely because the passphrase has not arrived yet. Because the
203   // passphrase may not arrive within this GetUpdates, we can't just return
204   // conflict, else we try to perform normal conflict resolution prematurely or
205   // the syncer may get stuck. As such, we return CONFLICT_ENCRYPTION, which is
206   // treated as an unresolvable conflict. See the description in syncer_types.h.
207   // This prevents any unsynced changes from commiting and postpones conflict
208   // resolution until all data can be decrypted.
209   if (specifics.has_encrypted() &&
210       !cryptographer->CanDecrypt(specifics.encrypted())) {
211     // We can't decrypt this node yet.
212     DVLOG(1) << "Received an undecryptable "
213              << ModelTypeToString(entry->GetServerModelType())
214              << " update, returning conflict_encryption.";
215     return CONFLICT_ENCRYPTION;
216   } else if (specifics.has_password() &&
217              entry->GetUniqueServerTag().empty()) {
218     // Passwords use their own legacy encryption scheme.
219     const sync_pb::PasswordSpecifics& password = specifics.password();
220     if (!cryptographer->CanDecrypt(password.encrypted())) {
221       DVLOG(1) << "Received an undecryptable password update, returning "
222                << "conflict_encryption.";
223       return CONFLICT_ENCRYPTION;
224     }
225   }
226 
227   if (!entry->GetServerIsDel()) {
228     syncable::Id new_parent = entry->GetServerParentId();
229     Entry parent(trans, GET_BY_ID,  new_parent);
230     // A note on non-directory parents:
231     // We catch most unfixable tree invariant errors at update receipt time,
232     // however we deal with this case here because we may receive the child
233     // first then the illegal parent. Instead of dealing with it twice in
234     // different ways we deal with it once here to reduce the amount of code and
235     // potential errors.
236     if (!parent.good() || parent.GetIsDel() || !parent.GetIsDir()) {
237       DVLOG(1) <<  "Entry has bad parent, returning conflict_hierarchy.";
238       return CONFLICT_HIERARCHY;
239     }
240     if (entry->GetParentId() != new_parent) {
241       if (!entry->GetIsDel() && !IsLegalNewParent(trans, id, new_parent)) {
242         DVLOG(1) << "Not updating item " << id
243                  << ", illegal new parent (would cause loop).";
244         return CONFLICT_HIERARCHY;
245       }
246     }
247   } else if (entry->GetIsDir()) {
248     Directory::Metahandles handles;
249     trans->directory()->GetChildHandlesById(trans, id, &handles);
250     if (!handles.empty()) {
251       // If we have still-existing children, then we need to deal with
252       // them before we can process this change.
253       DVLOG(1) << "Not deleting directory; it's not empty " << *entry;
254       return CONFLICT_HIERARCHY;
255     }
256   }
257 
258   if (entry->GetIsUnsynced()) {
259     DVLOG(1) << "Skipping update, returning conflict for: " << id
260              << " ; it's unsynced.";
261     return CONFLICT_SIMPLE;
262   }
263 
264   if (specifics.has_encrypted()) {
265     DVLOG(2) << "Received a decryptable "
266              << ModelTypeToString(entry->GetServerModelType())
267              << " update, applying normally.";
268   } else {
269     DVLOG(2) << "Received an unencrypted "
270              << ModelTypeToString(entry->GetServerModelType())
271              << " update, applying normally.";
272   }
273 
274   UpdateLocalDataFromServerData(trans, entry);
275 
276   return SUCCESS;
277 }
278 
GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity & update)279 std::string GetUniqueBookmarkTagFromUpdate(const sync_pb::SyncEntity& update) {
280   if (!update.has_originator_cache_guid() ||
281       !update.has_originator_client_item_id()) {
282     LOG(ERROR) << "Update is missing requirements for bookmark position."
283                << " This is a server bug.";
284     return UniquePosition::RandomSuffix();
285   }
286 
287   return syncable::GenerateSyncableBookmarkHash(
288       update.originator_cache_guid(), update.originator_client_item_id());
289 }
290 
GetUpdatePosition(const sync_pb::SyncEntity & update,const std::string & suffix)291 UniquePosition GetUpdatePosition(const sync_pb::SyncEntity& update,
292                                  const std::string& suffix) {
293   DCHECK(UniquePosition::IsValidSuffix(suffix));
294   if (!(SyncerProtoUtil::ShouldMaintainPosition(update))) {
295     return UniquePosition::CreateInvalid();
296   } else if (update.has_unique_position()) {
297     return UniquePosition::FromProto(update.unique_position());
298   } else if (update.has_position_in_parent()) {
299     return UniquePosition::FromInt64(update.position_in_parent(), suffix);
300   } else {
301     LOG(ERROR) << "No position information in update. This is a server bug.";
302     return UniquePosition::FromInt64(0, suffix);
303   }
304 }
305 
306 namespace {
307 
308 // Helper to synthesize a new-style sync_pb::EntitySpecifics for use locally,
309 // when the server speaks only the old sync_pb::SyncEntity_BookmarkData-based
310 // protocol.
UpdateBookmarkSpecifics(const std::string & singleton_tag,const std::string & url,const std::string & favicon_bytes,syncable::ModelNeutralMutableEntry * local_entry)311 void UpdateBookmarkSpecifics(const std::string& singleton_tag,
312                              const std::string& url,
313                              const std::string& favicon_bytes,
314                              syncable::ModelNeutralMutableEntry* local_entry) {
315   // In the new-style protocol, the server no longer sends bookmark info for
316   // the "google_chrome" folder.  Mimic that here.
317   if (singleton_tag == "google_chrome")
318     return;
319   sync_pb::EntitySpecifics pb;
320   sync_pb::BookmarkSpecifics* bookmark = pb.mutable_bookmark();
321   if (!url.empty())
322     bookmark->set_url(url);
323   if (!favicon_bytes.empty())
324     bookmark->set_favicon(favicon_bytes);
325   local_entry->PutServerSpecifics(pb);
326 }
327 
UpdateBookmarkPositioning(const sync_pb::SyncEntity & update,syncable::ModelNeutralMutableEntry * local_entry)328 void UpdateBookmarkPositioning(
329     const sync_pb::SyncEntity& update,
330     syncable::ModelNeutralMutableEntry* local_entry) {
331   // Update our unique bookmark tag.  In many cases this will be identical to
332   // the tag we already have.  However, clients that have recently upgraded to
333   // versions that support unique positions will have incorrect tags.  See the
334   // v86 migration logic in directory_backing_store.cc for more information.
335   //
336   // Both the old and new values are unique to this element.  Applying this
337   // update will not risk the creation of conflicting unique tags.
338   std::string bookmark_tag = GetUniqueBookmarkTagFromUpdate(update);
339   if (UniquePosition::IsValidSuffix(bookmark_tag)) {
340     local_entry->PutUniqueBookmarkTag(bookmark_tag);
341   }
342 
343   // Update our position.
344   UniquePosition update_pos =
345       GetUpdatePosition(update, local_entry->GetUniqueBookmarkTag());
346   if (update_pos.IsValid()) {
347     local_entry->PutServerUniquePosition(update_pos);
348   }
349 }
350 
351 }  // namespace
352 
UpdateServerFieldsFromUpdate(syncable::ModelNeutralMutableEntry * target,const sync_pb::SyncEntity & update,const std::string & name)353 void UpdateServerFieldsFromUpdate(
354     syncable::ModelNeutralMutableEntry* target,
355     const sync_pb::SyncEntity& update,
356     const std::string& name) {
357   if (update.deleted()) {
358     if (target->GetServerIsDel()) {
359       // If we already think the item is server-deleted, we're done.
360       // Skipping these cases prevents our committed deletions from coming
361       // back and overriding subsequent undeletions.  For non-deleted items,
362       // the version number check has a similar effect.
363       return;
364     }
365     // The server returns very lightweight replies for deletions, so we don't
366     // clobber a bunch of fields on delete.
367     target->PutServerIsDel(true);
368     if (!target->GetUniqueClientTag().empty()) {
369       // Items identified by the client unique tag are undeletable; when
370       // they're deleted, they go back to version 0.
371       target->PutServerVersion(0);
372     } else {
373       // Otherwise, fake a server version by bumping the local number.
374       target->PutServerVersion(
375           std::max(target->GetServerVersion(), target->GetBaseVersion()) + 1);
376     }
377     target->PutIsUnappliedUpdate(true);
378     return;
379   }
380 
381   DCHECK_EQ(target->GetId(), SyncableIdFromProto(update.id_string()))
382       << "ID Changing not supported here";
383   target->PutServerParentId(SyncableIdFromProto(update.parent_id_string()));
384   target->PutServerNonUniqueName(name);
385   target->PutServerVersion(update.version());
386   target->PutServerCtime(ProtoTimeToTime(update.ctime()));
387   target->PutServerMtime(ProtoTimeToTime(update.mtime()));
388   target->PutServerIsDir(IsFolder(update));
389   if (update.has_server_defined_unique_tag()) {
390     const std::string& tag = update.server_defined_unique_tag();
391     target->PutUniqueServerTag(tag);
392   }
393   if (update.has_client_defined_unique_tag()) {
394     const std::string& tag = update.client_defined_unique_tag();
395     target->PutUniqueClientTag(tag);
396   }
397   // Store the datatype-specific part as a protobuf.
398   if (update.has_specifics()) {
399     DCHECK_NE(GetModelType(update), UNSPECIFIED)
400         << "Storing unrecognized datatype in sync database.";
401     target->PutServerSpecifics(update.specifics());
402   } else if (update.has_bookmarkdata()) {
403     // Legacy protocol response for bookmark data.
404     const sync_pb::SyncEntity::BookmarkData& bookmark = update.bookmarkdata();
405     UpdateBookmarkSpecifics(update.server_defined_unique_tag(),
406                             bookmark.bookmark_url(),
407                             bookmark.bookmark_favicon(),
408                             target);
409   }
410   if (SyncerProtoUtil::ShouldMaintainPosition(update)) {
411     UpdateBookmarkPositioning(update, target);
412   }
413 
414   target->PutServerIsDel(update.deleted());
415   // We only mark the entry as unapplied if its version is greater than the
416   // local data. If we're processing the update that corresponds to one of our
417   // commit we don't apply it as time differences may occur.
418   if (update.version() > target->GetBaseVersion()) {
419     target->PutIsUnappliedUpdate(true);
420   }
421 }
422 
423 // Creates a new Entry iff no Entry exists with the given id.
CreateNewEntry(syncable::ModelNeutralWriteTransaction * trans,const syncable::Id & id)424 void CreateNewEntry(syncable::ModelNeutralWriteTransaction *trans,
425                     const syncable::Id& id) {
426   syncable::Entry entry(trans, GET_BY_ID, id);
427   if (!entry.good()) {
428     syncable::ModelNeutralMutableEntry new_entry(
429         trans,
430         syncable::CREATE_NEW_UPDATE_ITEM,
431         id);
432   }
433 }
434 
435 // This function is called on an entry when we can update the user-facing data
436 // from the server data.
UpdateLocalDataFromServerData(syncable::WriteTransaction * trans,syncable::MutableEntry * entry)437 void UpdateLocalDataFromServerData(
438     syncable::WriteTransaction* trans,
439     syncable::MutableEntry* entry) {
440   DCHECK(!entry->GetIsUnsynced());
441   DCHECK(entry->GetIsUnappliedUpdate());
442 
443   DVLOG(2) << "Updating entry : " << *entry;
444   // Start by setting the properties that determine the model_type.
445   entry->PutSpecifics(entry->GetServerSpecifics());
446   // Clear the previous server specifics now that we're applying successfully.
447   entry->PutBaseServerSpecifics(sync_pb::EntitySpecifics());
448   entry->PutIsDir(entry->GetServerIsDir());
449   // This strange dance around the IS_DEL flag avoids problems when setting
450   // the name.
451   // TODO(chron): Is this still an issue? Unit test this codepath.
452   if (entry->GetServerIsDel()) {
453     entry->PutIsDel(true);
454   } else {
455     entry->PutNonUniqueName(entry->GetServerNonUniqueName());
456     entry->PutParentId(entry->GetServerParentId());
457     entry->PutUniquePosition(entry->GetServerUniquePosition());
458     entry->PutIsDel(false);
459   }
460 
461   entry->PutCtime(entry->GetServerCtime());
462   entry->PutMtime(entry->GetServerMtime());
463   entry->PutBaseVersion(entry->GetServerVersion());
464   entry->PutIsDel(entry->GetServerIsDel());
465   entry->PutIsUnappliedUpdate(false);
466 }
467 
ValidateCommitEntry(syncable::Entry * entry)468 VerifyCommitResult ValidateCommitEntry(syncable::Entry* entry) {
469   syncable::Id id = entry->GetId();
470   if (id == entry->GetParentId()) {
471     CHECK(id.IsRoot()) << "Non-root item is self parenting." << *entry;
472     // If the root becomes unsynced it can cause us problems.
473     LOG(ERROR) << "Root item became unsynced " << *entry;
474     return VERIFY_UNSYNCABLE;
475   }
476   if (entry->IsRoot()) {
477     LOG(ERROR) << "Permanent item became unsynced " << *entry;
478     return VERIFY_UNSYNCABLE;
479   }
480   if (entry->GetIsDel() && !entry->GetId().ServerKnows()) {
481     // Drop deleted uncommitted entries.
482     return VERIFY_UNSYNCABLE;
483   }
484   return VERIFY_OK;
485 }
486 
MarkDeletedChildrenSynced(syncable::Directory * dir,syncable::BaseWriteTransaction * trans,std::set<syncable::Id> * deleted_folders)487 void MarkDeletedChildrenSynced(
488     syncable::Directory* dir,
489     syncable::BaseWriteTransaction* trans,
490     std::set<syncable::Id>* deleted_folders) {
491   // There's two options here.
492   // 1. Scan deleted unsynced entries looking up their pre-delete tree for any
493   //    of the deleted folders.
494   // 2. Take each folder and do a tree walk of all entries underneath it.
495   // #2 has a lower big O cost, but writing code to limit the time spent inside
496   // the transaction during each step is simpler with 1. Changing this decision
497   // may be sensible if this code shows up in profiling.
498   if (deleted_folders->empty())
499     return;
500   Directory::Metahandles handles;
501   dir->GetUnsyncedMetaHandles(trans, &handles);
502   if (handles.empty())
503     return;
504   Directory::Metahandles::iterator it;
505   for (it = handles.begin() ; it != handles.end() ; ++it) {
506     syncable::ModelNeutralMutableEntry entry(trans, GET_BY_HANDLE, *it);
507     if (!entry.GetIsUnsynced() || !entry.GetIsDel())
508       continue;
509     syncable::Id id = entry.GetParentId();
510     while (id != trans->root_id()) {
511       if (deleted_folders->find(id) != deleted_folders->end()) {
512         // We've synced the deletion of this deleted entries parent.
513         entry.PutIsUnsynced(false);
514         break;
515       }
516       Entry parent(trans, GET_BY_ID, id);
517       if (!parent.good() || !parent.GetIsDel())
518         break;
519       id = parent.GetParentId();
520     }
521   }
522 }
523 
VerifyNewEntry(const sync_pb::SyncEntity & update,syncable::Entry * target,const bool deleted)524 VerifyResult VerifyNewEntry(
525     const sync_pb::SyncEntity& update,
526     syncable::Entry* target,
527     const bool deleted) {
528   if (target->good()) {
529     // Not a new update.
530     return VERIFY_UNDECIDED;
531   }
532   if (deleted) {
533     // Deletion of an item we've never seen can be ignored.
534     return VERIFY_SKIP;
535   }
536 
537   return VERIFY_SUCCESS;
538 }
539 
540 // Assumes we have an existing entry; check here for updates that break
541 // consistency rules.
VerifyUpdateConsistency(syncable::ModelNeutralWriteTransaction * trans,const sync_pb::SyncEntity & update,const bool deleted,const bool is_directory,ModelType model_type,syncable::ModelNeutralMutableEntry * target)542 VerifyResult VerifyUpdateConsistency(
543     syncable::ModelNeutralWriteTransaction* trans,
544     const sync_pb::SyncEntity& update,
545     const bool deleted,
546     const bool is_directory,
547     ModelType model_type,
548     syncable::ModelNeutralMutableEntry* target) {
549 
550   CHECK(target->good());
551   const syncable::Id& update_id = SyncableIdFromProto(update.id_string());
552 
553   // If the update is a delete, we don't really need to worry at this stage.
554   if (deleted)
555     return VERIFY_SUCCESS;
556 
557   if (model_type == UNSPECIFIED) {
558     // This update is to an item of a datatype we don't recognize. The server
559     // shouldn't have sent it to us.  Throw it on the ground.
560     return VERIFY_SKIP;
561   }
562 
563   if (target->GetServerVersion() > 0) {
564     // Then we've had an update for this entry before.
565     if (is_directory != target->GetServerIsDir() ||
566         model_type != target->GetServerModelType()) {
567       if (target->GetIsDel()) {  // If we've deleted the item, we don't care.
568         return VERIFY_SKIP;
569       } else {
570         LOG(ERROR) << "Server update doesn't agree with previous updates. ";
571         LOG(ERROR) << " Entry: " << *target;
572         LOG(ERROR) << " Update: "
573                    << SyncerProtoUtil::SyncEntityDebugString(update);
574         return VERIFY_FAIL;
575       }
576     }
577 
578     if (!deleted && (target->GetId() == update_id) &&
579         (target->GetServerIsDel() ||
580          (!target->GetIsUnsynced() && target->GetIsDel() &&
581           target->GetBaseVersion() > 0))) {
582       // An undelete. The latter case in the above condition is for
583       // when the server does not give us an update following the
584       // commit of a delete, before undeleting.
585       // Undeletion is common for items that reuse the client-unique tag.
586       VerifyResult result = VerifyUndelete(trans, update, target);
587       if (VERIFY_UNDECIDED != result)
588         return result;
589     }
590   }
591   if (target->GetBaseVersion() > 0) {
592     // We've committed this update in the past.
593     if (is_directory != target->GetIsDir() ||
594         model_type != target->GetModelType()) {
595       LOG(ERROR) << "Server update doesn't agree with committed item. ";
596       LOG(ERROR) << " Entry: " << *target;
597       LOG(ERROR) << " Update: "
598                  << SyncerProtoUtil::SyncEntityDebugString(update);
599       return VERIFY_FAIL;
600     }
601     if (target->GetId() == update_id) {
602       if (target->GetServerVersion() > update.version()) {
603         LOG(WARNING) << "We've already seen a more recent version.";
604         LOG(WARNING) << " Entry: " << *target;
605         LOG(WARNING) << " Update: "
606                      << SyncerProtoUtil::SyncEntityDebugString(update);
607         return VERIFY_SKIP;
608       }
609     }
610   }
611   return VERIFY_SUCCESS;
612 }
613 
614 // Assumes we have an existing entry; verify an update that seems to be
615 // expressing an 'undelete'
VerifyUndelete(syncable::ModelNeutralWriteTransaction * trans,const sync_pb::SyncEntity & update,syncable::ModelNeutralMutableEntry * target)616 VerifyResult VerifyUndelete(syncable::ModelNeutralWriteTransaction* trans,
617                             const sync_pb::SyncEntity& update,
618                             syncable::ModelNeutralMutableEntry* target) {
619   // TODO(nick): We hit this path for items deleted items that the server
620   // tells us to re-create; only deleted items with positive base versions
621   // will hit this path.  However, it's not clear how such an undeletion
622   // would actually succeed on the server; in the protocol, a base
623   // version of 0 is required to undelete an object.  This codepath
624   // should be deprecated in favor of client-tag style undeletion
625   // (where items go to version 0 when they're deleted), or else
626   // removed entirely (if this type of undeletion is indeed impossible).
627   CHECK(target->good());
628   DVLOG(1) << "Server update is attempting undelete. " << *target
629            << "Update:" << SyncerProtoUtil::SyncEntityDebugString(update);
630   // Move the old one aside and start over.  It's too tricky to get the old one
631   // back into a state that would pass CheckTreeInvariants().
632   if (target->GetIsDel()) {
633     if (target->GetUniqueClientTag().empty())
634       LOG(WARNING) << "Doing move-aside undeletion on client-tagged item.";
635     target->PutId(trans->directory()->NextId());
636     target->PutUniqueClientTag(std::string());
637     target->PutBaseVersion(CHANGES_VERSION);
638     target->PutServerVersion(0);
639     return VERIFY_SUCCESS;
640   }
641   if (update.version() < target->GetServerVersion()) {
642     LOG(WARNING) << "Update older than current server version for "
643                  << *target << " Update:"
644                  << SyncerProtoUtil::SyncEntityDebugString(update);
645     return VERIFY_SUCCESS;  // Expected in new sync protocol.
646   }
647   return VERIFY_UNDECIDED;
648 }
649 
650 }  // namespace syncer
651