1 // Copyright (c) 2010 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "chrome/browser/sync/engine/conflict_resolver.h"
6
7 #include <map>
8 #include <set>
9
10 #include "chrome/browser/sync/engine/syncer.h"
11 #include "chrome/browser/sync/engine/syncer_util.h"
12 #include "chrome/browser/sync/protocol/service_constants.h"
13 #include "chrome/browser/sync/sessions/status_controller.h"
14 #include "chrome/browser/sync/syncable/directory_manager.h"
15 #include "chrome/browser/sync/syncable/syncable.h"
16 #include "chrome/common/deprecated/event_sys-inl.h"
17
18 using std::map;
19 using std::set;
20 using syncable::BaseTransaction;
21 using syncable::Directory;
22 using syncable::Entry;
23 using syncable::Id;
24 using syncable::MutableEntry;
25 using syncable::ScopedDirLookup;
26 using syncable::WriteTransaction;
27
28 namespace browser_sync {
29
30 using sessions::ConflictProgress;
31 using sessions::StatusController;
32
33 const int SYNC_CYCLES_BEFORE_ADMITTING_DEFEAT = 8;
34
ConflictResolver()35 ConflictResolver::ConflictResolver() {
36 }
37
~ConflictResolver()38 ConflictResolver::~ConflictResolver() {
39 }
40
IgnoreLocalChanges(MutableEntry * entry)41 void ConflictResolver::IgnoreLocalChanges(MutableEntry* entry) {
42 // An update matches local actions, merge the changes.
43 // This is a little fishy because we don't actually merge them.
44 // In the future we should do a 3-way merge.
45 VLOG(1) << "Server and local changes match, merging:" << entry;
46 // With IS_UNSYNCED false, changes should be merged.
47 // METRIC simple conflict resolved by merge.
48 entry->Put(syncable::IS_UNSYNCED, false);
49 }
50
OverwriteServerChanges(WriteTransaction * trans,MutableEntry * entry)51 void ConflictResolver::OverwriteServerChanges(WriteTransaction* trans,
52 MutableEntry * entry) {
53 // This is similar to an overwrite from the old client.
54 // This is equivalent to a scenario where we got the update before we'd
55 // made our local client changes.
56 // TODO(chron): This is really a general property clobber. We clobber
57 // the server side property. Perhaps we should actually do property merging.
58 entry->Put(syncable::BASE_VERSION, entry->Get(syncable::SERVER_VERSION));
59 entry->Put(syncable::IS_UNAPPLIED_UPDATE, false);
60 // METRIC conflict resolved by overwrite.
61 }
62
63 ConflictResolver::ProcessSimpleConflictResult
ProcessSimpleConflict(WriteTransaction * trans,const Id & id)64 ConflictResolver::ProcessSimpleConflict(WriteTransaction* trans,
65 const Id& id) {
66 MutableEntry entry(trans, syncable::GET_BY_ID, id);
67 // Must be good as the entry won't have been cleaned up.
68 CHECK(entry.good());
69 // If an update fails, locally we have to be in a set or unsynced. We're not
70 // in a set here, so we must be unsynced.
71 if (!entry.Get(syncable::IS_UNSYNCED)) {
72 return NO_SYNC_PROGRESS;
73 }
74
75 if (!entry.Get(syncable::IS_UNAPPLIED_UPDATE)) {
76 if (!entry.Get(syncable::PARENT_ID).ServerKnows()) {
77 VLOG(1) << "Item conflicting because its parent not yet committed. Id: "
78 << id;
79 } else {
80 VLOG(1) << "No set for conflicting entry id " << id << ". There should "
81 "be an update/commit that will fix this soon. This message "
82 "should not repeat.";
83 }
84 return NO_SYNC_PROGRESS;
85 }
86
87 if (entry.Get(syncable::IS_DEL) && entry.Get(syncable::SERVER_IS_DEL)) {
88 // we've both deleted it, so lets just drop the need to commit/update this
89 // entry.
90 entry.Put(syncable::IS_UNSYNCED, false);
91 entry.Put(syncable::IS_UNAPPLIED_UPDATE, false);
92 // we've made changes, but they won't help syncing progress.
93 // METRIC simple conflict resolved by merge.
94 return NO_SYNC_PROGRESS;
95 }
96
97 if (!entry.Get(syncable::SERVER_IS_DEL)) {
98 // This logic determines "client wins" vs. "server wins" strategy picking.
99 // TODO(nick): The current logic is arbitrary; instead, it ought to be made
100 // consistent with the ModelAssociator behavior for a datatype. It would
101 // be nice if we could route this back to ModelAssociator code to pick one
102 // of three options: CLIENT, SERVER, or MERGE. Some datatypes (autofill)
103 // are easily mergeable.
104 bool name_matches = entry.Get(syncable::NON_UNIQUE_NAME) ==
105 entry.Get(syncable::SERVER_NON_UNIQUE_NAME);
106 bool parent_matches = entry.Get(syncable::PARENT_ID) ==
107 entry.Get(syncable::SERVER_PARENT_ID);
108 bool entry_deleted = entry.Get(syncable::IS_DEL);
109
110 if (!entry_deleted && name_matches && parent_matches) {
111 VLOG(1) << "Resolving simple conflict, ignoring local changes for:"
112 << entry;
113 IgnoreLocalChanges(&entry);
114 } else {
115 VLOG(1) << "Resolving simple conflict, overwriting server changes for:"
116 << entry;
117 OverwriteServerChanges(trans, &entry);
118 }
119 return SYNC_PROGRESS;
120 } else { // SERVER_IS_DEL is true
121 // If a server deleted folder has local contents we should be in a set.
122 if (entry.Get(syncable::IS_DIR)) {
123 Directory::ChildHandles children;
124 trans->directory()->GetChildHandles(trans,
125 entry.Get(syncable::ID),
126 &children);
127 if (0 != children.size()) {
128 VLOG(1) << "Entry is a server deleted directory with local contents, "
129 "should be in a set. (race condition).";
130 return NO_SYNC_PROGRESS;
131 }
132 }
133
134 // The entry is deleted on the server but still exists locally.
135 if (!entry.Get(syncable::UNIQUE_CLIENT_TAG).empty()) {
136 // If we've got a client-unique tag, we can undelete while retaining
137 // our present ID.
138 DCHECK_EQ(entry.Get(syncable::SERVER_VERSION), 0) << "For the server to "
139 "know to re-create, client-tagged items should revert to version 0 "
140 "when server-deleted.";
141 OverwriteServerChanges(trans, &entry);
142 // Clobber the versions, just in case the above DCHECK is violated.
143 entry.Put(syncable::SERVER_VERSION, 0);
144 entry.Put(syncable::BASE_VERSION, 0);
145 } else {
146 // Otherwise, we've got to undelete by creating a new locally
147 // uncommitted entry.
148 SyncerUtil::SplitServerInformationIntoNewEntry(trans, &entry);
149
150 MutableEntry server_update(trans, syncable::GET_BY_ID, id);
151 CHECK(server_update.good());
152 CHECK(server_update.Get(syncable::META_HANDLE) !=
153 entry.Get(syncable::META_HANDLE))
154 << server_update << entry;
155 }
156 return SYNC_PROGRESS;
157 }
158 }
159
GetSetKey(ConflictSet * set)160 ConflictResolver::ConflictSetCountMapKey ConflictResolver::GetSetKey(
161 ConflictSet* set) {
162 // TODO(sync): Come up with a better scheme for set hashing. This scheme
163 // will make debugging easy.
164 // If this call to sort is removed, we need to add one before we use
165 // binary_search in ProcessConflictSet.
166 sort(set->begin(), set->end());
167 std::stringstream rv;
168 for (ConflictSet::iterator i = set->begin() ; i != set->end() ; ++i )
169 rv << *i << ".";
170 return rv.str();
171 }
172
173 namespace {
174
AttemptToFixCircularConflict(WriteTransaction * trans,ConflictSet * conflict_set)175 bool AttemptToFixCircularConflict(WriteTransaction* trans,
176 ConflictSet* conflict_set) {
177 ConflictSet::const_iterator i, j;
178 for (i = conflict_set->begin() ; i != conflict_set->end() ; ++i) {
179 MutableEntry entryi(trans, syncable::GET_BY_ID, *i);
180 if (entryi.Get(syncable::PARENT_ID) ==
181 entryi.Get(syncable::SERVER_PARENT_ID) ||
182 !entryi.Get(syncable::IS_UNAPPLIED_UPDATE) ||
183 !entryi.Get(syncable::IS_DIR)) {
184 continue;
185 }
186 Id parentid = entryi.Get(syncable::SERVER_PARENT_ID);
187 // Create the entry here as it's the only place we could ever get a parentid
188 // that doesn't correspond to a real entry.
189 Entry parent(trans, syncable::GET_BY_ID, parentid);
190 if (!parent.good()) // server parent update not received yet
191 continue;
192 // This loop walks upwards from the server parent. If we hit the root (0)
193 // all is well. If we hit the entry we're examining it means applying the
194 // parent id would cause a loop. We don't need more general loop detection
195 // because we know our local tree is valid.
196 while (!parentid.IsRoot()) {
197 Entry parent(trans, syncable::GET_BY_ID, parentid);
198 CHECK(parent.good());
199 if (parentid == *i)
200 break; // It's a loop.
201 parentid = parent.Get(syncable::PARENT_ID);
202 }
203 if (parentid.IsRoot())
204 continue;
205 VLOG(1) << "Overwriting server changes to avoid loop: " << entryi;
206 entryi.Put(syncable::BASE_VERSION, entryi.Get(syncable::SERVER_VERSION));
207 entryi.Put(syncable::IS_UNSYNCED, true);
208 entryi.Put(syncable::IS_UNAPPLIED_UPDATE, false);
209 // METRIC conflict resolved by breaking dir loop.
210 return true;
211 }
212 return false;
213 }
214
AttemptToFixUnsyncedEntryInDeletedServerTree(WriteTransaction * trans,ConflictSet * conflict_set,const Entry & entry)215 bool AttemptToFixUnsyncedEntryInDeletedServerTree(WriteTransaction* trans,
216 ConflictSet* conflict_set,
217 const Entry& entry) {
218 if (!entry.Get(syncable::IS_UNSYNCED) || entry.Get(syncable::IS_DEL))
219 return false;
220 Id parentid = entry.Get(syncable::PARENT_ID);
221 MutableEntry parent(trans, syncable::GET_BY_ID, parentid);
222 if (!parent.good() || !parent.Get(syncable::IS_UNAPPLIED_UPDATE) ||
223 !parent.Get(syncable::SERVER_IS_DEL) ||
224 !binary_search(conflict_set->begin(), conflict_set->end(), parentid))
225 return false;
226 // We're trying to commit into a directory tree that's been deleted. To
227 // solve this we recreate the directory tree.
228 //
229 // We do this in two parts, first we ensure the tree is unaltered since the
230 // conflict was detected.
231 Id id = parentid;
232 while (!id.IsRoot()) {
233 if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
234 break;
235 Entry parent(trans, syncable::GET_BY_ID, id);
236 if (!parent.good() || !parent.Get(syncable::IS_UNAPPLIED_UPDATE) ||
237 !parent.Get(syncable::SERVER_IS_DEL))
238 return false;
239 id = parent.Get(syncable::PARENT_ID);
240 }
241 // Now we fix up the entries.
242 id = parentid;
243 while (!id.IsRoot()) {
244 MutableEntry parent(trans, syncable::GET_BY_ID, id);
245 if (!binary_search(conflict_set->begin(), conflict_set->end(), id))
246 break;
247 VLOG(1) << "Giving directory a new id so we can undelete it " << parent;
248 ClearServerData(&parent);
249 SyncerUtil::ChangeEntryIDAndUpdateChildren(trans, &parent,
250 trans->directory()->NextId());
251 parent.Put(syncable::BASE_VERSION, 0);
252 parent.Put(syncable::IS_UNSYNCED, true);
253 id = parent.Get(syncable::PARENT_ID);
254 // METRIC conflict resolved by recreating dir tree.
255 }
256 return true;
257 }
258
259 // TODO(chron): needs unit test badly
AttemptToFixUpdateEntryInDeletedLocalTree(WriteTransaction * trans,ConflictSet * conflict_set,const Entry & entry)260 bool AttemptToFixUpdateEntryInDeletedLocalTree(WriteTransaction* trans,
261 ConflictSet* conflict_set,
262 const Entry& entry) {
263 if (!entry.Get(syncable::IS_UNAPPLIED_UPDATE) ||
264 entry.Get(syncable::SERVER_IS_DEL))
265 return false;
266 Id parent_id = entry.Get(syncable::SERVER_PARENT_ID);
267 MutableEntry parent(trans, syncable::GET_BY_ID, parent_id);
268 if (!parent.good() || !parent.Get(syncable::IS_DEL) ||
269 !binary_search(conflict_set->begin(), conflict_set->end(), parent_id)) {
270 return false;
271 }
272 // We've deleted a directory tree that's got contents on the server. We
273 // recreate the directory to solve the problem.
274 //
275 // We do this in two parts, first we ensure the tree is unaltered since
276 // the conflict was detected.
277 Id id = parent_id;
278 // As we will be crawling the path of deleted entries there's a chance we'll
279 // end up having to reparent an item as there will be an invalid parent.
280 Id reroot_id = syncable::kNullId;
281 // Similarly crawling deleted items means we risk loops.
282 int loop_detection = conflict_set->size();
283 while (!id.IsRoot() && --loop_detection >= 0) {
284 Entry parent(trans, syncable::GET_BY_ID, id);
285 // If we get a bad parent, or a parent that's deleted on client and server
286 // we recreate the hierarchy in the root.
287 if (!parent.good()) {
288 reroot_id = id;
289 break;
290 }
291 CHECK(parent.Get(syncable::IS_DIR));
292 if (!binary_search(conflict_set->begin(), conflict_set->end(), id)) {
293 // We've got to an entry that's not in the set. If it has been deleted
294 // between set building and this point in time we return false. If it had
295 // been deleted earlier it would have been in the set.
296 // TODO(sync): Revisit syncer code organization to see if conflict
297 // resolution can be done in the same transaction as set building.
298 if (parent.Get(syncable::IS_DEL))
299 return false;
300 break;
301 }
302 if (!parent.Get(syncable::IS_DEL) ||
303 parent.Get(syncable::SERVER_IS_DEL) ||
304 !parent.Get(syncable::IS_UNSYNCED)) {
305 return false;
306 }
307 id = parent.Get(syncable::PARENT_ID);
308 }
309 // If we find we've been looping we re-root the hierarchy.
310 if (loop_detection < 0) {
311 if (id == entry.Get(syncable::ID))
312 reroot_id = entry.Get(syncable::PARENT_ID);
313 else
314 reroot_id = id;
315 }
316 // Now we fix things up by undeleting all the folders in the item's path.
317 id = parent_id;
318 while (!id.IsRoot() && id != reroot_id) {
319 if (!binary_search(conflict_set->begin(), conflict_set->end(), id)) {
320 break;
321 }
322 MutableEntry entry(trans, syncable::GET_BY_ID, id);
323
324 VLOG(1) << "Undoing our deletion of " << entry
325 << ", will have name " << entry.Get(syncable::NON_UNIQUE_NAME);
326
327 Id parent_id = entry.Get(syncable::PARENT_ID);
328 if (parent_id == reroot_id) {
329 parent_id = trans->root_id();
330 }
331 entry.Put(syncable::PARENT_ID, parent_id);
332 entry.Put(syncable::IS_DEL, false);
333 id = entry.Get(syncable::PARENT_ID);
334 // METRIC conflict resolved by recreating dir tree.
335 }
336 return true;
337 }
338
AttemptToFixRemovedDirectoriesWithContent(WriteTransaction * trans,ConflictSet * conflict_set)339 bool AttemptToFixRemovedDirectoriesWithContent(WriteTransaction* trans,
340 ConflictSet* conflict_set) {
341 ConflictSet::const_iterator i,j;
342 for (i = conflict_set->begin() ; i != conflict_set->end() ; ++i) {
343 Entry entry(trans, syncable::GET_BY_ID, *i);
344 if (AttemptToFixUnsyncedEntryInDeletedServerTree(trans,
345 conflict_set, entry)) {
346 return true;
347 }
348 if (AttemptToFixUpdateEntryInDeletedLocalTree(trans, conflict_set, entry))
349 return true;
350 }
351 return false;
352 }
353
354 } // namespace
355
356 // TODO(sync): Eliminate conflict sets. They're not necessary.
ProcessConflictSet(WriteTransaction * trans,ConflictSet * conflict_set,int conflict_count)357 bool ConflictResolver::ProcessConflictSet(WriteTransaction* trans,
358 ConflictSet* conflict_set,
359 int conflict_count) {
360 int set_size = conflict_set->size();
361 if (set_size < 2) {
362 LOG(WARNING) << "Skipping conflict set because it has size " << set_size;
363 // We can end up with sets of size one if we have a new item in a set that
364 // we tried to commit transactionally. This should not be a persistent
365 // situation.
366 return false;
367 }
368 if (conflict_count < 3) {
369 // Avoid resolving sets that could be the result of transient conflicts.
370 // Transient conflicts can occur because the client or server can be
371 // slightly out of date.
372 return false;
373 }
374
375 VLOG(1) << "Fixing a set containing " << set_size << " items";
376
377 // Fix circular conflicts.
378 if (AttemptToFixCircularConflict(trans, conflict_set))
379 return true;
380 // Check for problems involving contents of removed folders.
381 if (AttemptToFixRemovedDirectoriesWithContent(trans, conflict_set))
382 return true;
383 return false;
384 }
385
386 template <typename InputIt>
LogAndSignalIfConflictStuck(BaseTransaction * trans,int attempt_count,InputIt begin,InputIt end,StatusController * status)387 bool ConflictResolver::LogAndSignalIfConflictStuck(
388 BaseTransaction* trans,
389 int attempt_count,
390 InputIt begin,
391 InputIt end,
392 StatusController* status) {
393 if (attempt_count < SYNC_CYCLES_BEFORE_ADMITTING_DEFEAT) {
394 return false;
395 }
396
397 // Don't signal stuck if we're not up to date.
398 if (status->num_server_changes_remaining() > 0) {
399 return false;
400 }
401
402 LOG(ERROR) << "[BUG] Conflict set cannot be resolved, has "
403 << end - begin << " items:";
404 for (InputIt i = begin ; i != end ; ++i) {
405 Entry e(trans, syncable::GET_BY_ID, *i);
406 if (e.good())
407 LOG(ERROR) << " " << e;
408 else
409 LOG(ERROR) << " Bad ID:" << *i;
410 }
411
412 status->set_syncer_stuck(true);
413
414 return true;
415 // TODO(sync): If we're stuck for a while we need to alert the user, clear
416 // cache or reset syncing. At the very least we should stop trying something
417 // that's obviously not working.
418 }
419
ResolveSimpleConflicts(const ScopedDirLookup & dir,StatusController * status)420 bool ConflictResolver::ResolveSimpleConflicts(const ScopedDirLookup& dir,
421 StatusController* status) {
422 WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
423 bool forward_progress = false;
424 const ConflictProgress& progress = status->conflict_progress();
425 // First iterate over simple conflict items (those that belong to no set).
426 set<Id>::const_iterator conflicting_item_it;
427 for (conflicting_item_it = progress.ConflictingItemsBeginConst();
428 conflicting_item_it != progress.ConflictingItemsEnd();
429 ++conflicting_item_it) {
430 Id id = *conflicting_item_it;
431 map<Id, ConflictSet*>::const_iterator item_set_it =
432 progress.IdToConflictSetFind(id);
433 if (item_set_it == progress.IdToConflictSetEnd() ||
434 0 == item_set_it->second) {
435 // We have a simple conflict.
436 switch (ProcessSimpleConflict(&trans, id)) {
437 case NO_SYNC_PROGRESS:
438 {
439 int conflict_count = (simple_conflict_count_map_[id] += 2);
440 LogAndSignalIfConflictStuck(&trans, conflict_count,
441 &id, &id + 1, status);
442 break;
443 }
444 case SYNC_PROGRESS:
445 forward_progress = true;
446 break;
447 }
448 }
449 }
450 // Reduce the simple_conflict_count for each item currently tracked.
451 SimpleConflictCountMap::iterator i = simple_conflict_count_map_.begin();
452 while (i != simple_conflict_count_map_.end()) {
453 if (0 == --(i->second))
454 simple_conflict_count_map_.erase(i++);
455 else
456 ++i;
457 }
458 return forward_progress;
459 }
460
ResolveConflicts(const ScopedDirLookup & dir,StatusController * status)461 bool ConflictResolver::ResolveConflicts(const ScopedDirLookup& dir,
462 StatusController* status) {
463 const ConflictProgress& progress = status->conflict_progress();
464 bool rv = false;
465 if (ResolveSimpleConflicts(dir, status))
466 rv = true;
467 WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
468 set<Id> children_of_dirs_merged_last_round;
469 std::swap(children_of_merged_dirs_, children_of_dirs_merged_last_round);
470 set<ConflictSet*>::const_iterator set_it;
471 for (set_it = progress.ConflictSetsBegin();
472 set_it != progress.ConflictSetsEnd();
473 set_it++) {
474 ConflictSet* conflict_set = *set_it;
475 ConflictSetCountMapKey key = GetSetKey(conflict_set);
476 conflict_set_count_map_[key] += 2;
477 int conflict_count = conflict_set_count_map_[key];
478 // Keep a metric for new sets.
479 if (2 == conflict_count) {
480 // METRIC conflict sets seen ++
481 }
482 // See if this set contains entries whose parents were merged last round.
483 if (SortedCollectionsIntersect(children_of_dirs_merged_last_round.begin(),
484 children_of_dirs_merged_last_round.end(),
485 conflict_set->begin(),
486 conflict_set->end())) {
487 VLOG(1) << "Accelerating resolution for hierarchical merge.";
488 conflict_count += 2;
489 }
490 // See if we should process this set.
491 if (ProcessConflictSet(&trans, conflict_set, conflict_count)) {
492 rv = true;
493 }
494 LogAndSignalIfConflictStuck(&trans, conflict_count,
495 conflict_set->begin(),
496 conflict_set->end(), status);
497 }
498 if (rv) {
499 // This code means we don't signal that syncing is stuck when any conflict
500 // resolution has occured.
501 // TODO(sync): As this will also reduce our sensitivity to problem
502 // conditions and increase the time for cascading resolutions we may have to
503 // revisit this code later, doing something more intelligent.
504 conflict_set_count_map_.clear();
505 simple_conflict_count_map_.clear();
506 }
507 ConflictSetCountMap::iterator i = conflict_set_count_map_.begin();
508 while (i != conflict_set_count_map_.end()) {
509 if (0 == --i->second) {
510 conflict_set_count_map_.erase(i++);
511 // METRIC self resolved conflict sets ++.
512 } else {
513 ++i;
514 }
515 }
516 return rv;
517 }
518
519 } // namespace browser_sync
520