1 // Copyright 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "sync/syncable/directory_backing_store.h"
6
7 #include "build/build_config.h"
8
9 #include <limits>
10
11 #include "base/base64.h"
12 #include "base/debug/trace_event.h"
13 #include "base/logging.h"
14 #include "base/rand_util.h"
15 #include "base/strings/stringprintf.h"
16 #include "base/time/time.h"
17 #include "sql/connection.h"
18 #include "sql/statement.h"
19 #include "sql/transaction.h"
20 #include "sync/internal_api/public/base/node_ordinal.h"
21 #include "sync/protocol/bookmark_specifics.pb.h"
22 #include "sync/protocol/sync.pb.h"
23 #include "sync/syncable/syncable-inl.h"
24 #include "sync/syncable/syncable_columns.h"
25 #include "sync/syncable/syncable_util.h"
26 #include "sync/util/time.h"
27
28 using std::string;
29
30 namespace syncer {
31 namespace syncable {
32
33 // This just has to be big enough to hold an UPDATE or INSERT statement that
34 // modifies all the columns in the entry table.
35 static const string::size_type kUpdateStatementBufferSize = 2048;
36
37 // Increment this version whenever updating DB tables.
38 const int32 kCurrentDBVersion = 88;
39
40 // Iterate over the fields of |entry| and bind each to |statement| for
41 // updating. Returns the number of args bound.
BindFields(const EntryKernel & entry,sql::Statement * statement)42 void BindFields(const EntryKernel& entry,
43 sql::Statement* statement) {
44 int index = 0;
45 int i = 0;
46 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
47 statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i)));
48 }
49 for ( ; i < TIME_FIELDS_END; ++i) {
50 statement->BindInt64(index++,
51 TimeToProtoTime(
52 entry.ref(static_cast<TimeField>(i))));
53 }
54 for ( ; i < ID_FIELDS_END; ++i) {
55 statement->BindString(index++, entry.ref(static_cast<IdField>(i)).s_);
56 }
57 for ( ; i < BIT_FIELDS_END; ++i) {
58 statement->BindInt(index++, entry.ref(static_cast<BitField>(i)));
59 }
60 for ( ; i < STRING_FIELDS_END; ++i) {
61 statement->BindString(index++, entry.ref(static_cast<StringField>(i)));
62 }
63 for ( ; i < PROTO_FIELDS_END; ++i) {
64 std::string temp;
65 entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
66 statement->BindBlob(index++, temp.data(), temp.length());
67 }
68 for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
69 std::string temp;
70 entry.ref(static_cast<UniquePositionField>(i)).SerializeToString(&temp);
71 statement->BindBlob(index++, temp.data(), temp.length());
72 }
73 for (; i < ATTACHMENT_METADATA_FIELDS_END; ++i) {
74 std::string temp;
75 entry.ref(static_cast<AttachmentMetadataField>(i)).SerializeToString(&temp);
76 statement->BindBlob(index++, temp.data(), temp.length());
77 }
78 }
79
80 // The caller owns the returned EntryKernel*. Assumes the statement currently
81 // points to a valid row in the metas table. Returns NULL to indicate that
82 // it detected a corruption in the data on unpacking.
UnpackEntry(sql::Statement * statement)83 scoped_ptr<EntryKernel> UnpackEntry(sql::Statement* statement) {
84 scoped_ptr<EntryKernel> kernel(new EntryKernel());
85 DCHECK_EQ(statement->ColumnCount(), static_cast<int>(FIELD_COUNT));
86 int i = 0;
87 for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
88 kernel->put(static_cast<Int64Field>(i), statement->ColumnInt64(i));
89 }
90 for ( ; i < TIME_FIELDS_END; ++i) {
91 kernel->put(static_cast<TimeField>(i),
92 ProtoTimeToTime(statement->ColumnInt64(i)));
93 }
94 for ( ; i < ID_FIELDS_END; ++i) {
95 kernel->mutable_ref(static_cast<IdField>(i)).s_ =
96 statement->ColumnString(i);
97 }
98 for ( ; i < BIT_FIELDS_END; ++i) {
99 kernel->put(static_cast<BitField>(i), (0 != statement->ColumnInt(i)));
100 }
101 for ( ; i < STRING_FIELDS_END; ++i) {
102 kernel->put(static_cast<StringField>(i),
103 statement->ColumnString(i));
104 }
105 for ( ; i < PROTO_FIELDS_END; ++i) {
106 kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
107 statement->ColumnBlob(i), statement->ColumnByteLength(i));
108 }
109 for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
110 std::string temp;
111 statement->ColumnBlobAsString(i, &temp);
112
113 sync_pb::UniquePosition proto;
114 if (!proto.ParseFromString(temp)) {
115 DVLOG(1) << "Unpacked invalid position. Assuming the DB is corrupt";
116 return scoped_ptr<EntryKernel>();
117 }
118
119 kernel->mutable_ref(static_cast<UniquePositionField>(i)) =
120 UniquePosition::FromProto(proto);
121 }
122 for (; i < ATTACHMENT_METADATA_FIELDS_END; ++i) {
123 kernel->mutable_ref(static_cast<AttachmentMetadataField>(i)).ParseFromArray(
124 statement->ColumnBlob(i), statement->ColumnByteLength(i));
125 }
126
127 // Sanity check on positions. We risk strange and rare crashes if our
128 // assumptions about unique position values are broken.
129 if (kernel->ShouldMaintainPosition() &&
130 !kernel->ref(UNIQUE_POSITION).IsValid()) {
131 DVLOG(1) << "Unpacked invalid position on an entity that should have a "
132 << "valid position. Assuming the DB is corrupt.";
133 return scoped_ptr<EntryKernel>();
134 }
135
136 return kernel.Pass();
137 }
138
139 namespace {
140
ComposeCreateTableColumnSpecs()141 string ComposeCreateTableColumnSpecs() {
142 const ColumnSpec* begin = g_metas_columns;
143 const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns);
144 string query;
145 query.reserve(kUpdateStatementBufferSize);
146 char separator = '(';
147 for (const ColumnSpec* column = begin; column != end; ++column) {
148 query.push_back(separator);
149 separator = ',';
150 query.append(column->name);
151 query.push_back(' ');
152 query.append(column->spec);
153 }
154 query.push_back(')');
155 return query;
156 }
157
AppendColumnList(std::string * output)158 void AppendColumnList(std::string* output) {
159 const char* joiner = " ";
160 // Be explicit in SELECT order to match up with UnpackEntry.
161 for (int i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
162 output->append(joiner);
163 output->append(ColumnName(i));
164 joiner = ", ";
165 }
166 }
167
168 } // namespace
169
170 ///////////////////////////////////////////////////////////////////////////////
171 // DirectoryBackingStore implementation.
172
DirectoryBackingStore(const string & dir_name)173 DirectoryBackingStore::DirectoryBackingStore(const string& dir_name)
174 : db_(new sql::Connection()),
175 dir_name_(dir_name),
176 needs_column_refresh_(false) {
177 db_->set_histogram_tag("SyncDirectory");
178 db_->set_page_size(4096);
179 db_->set_cache_size(32);
180 }
181
DirectoryBackingStore(const string & dir_name,sql::Connection * db)182 DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
183 sql::Connection* db)
184 : db_(db),
185 dir_name_(dir_name),
186 needs_column_refresh_(false) {
187 }
188
~DirectoryBackingStore()189 DirectoryBackingStore::~DirectoryBackingStore() {
190 }
191
DeleteEntries(EntryTable from,const MetahandleSet & handles)192 bool DirectoryBackingStore::DeleteEntries(EntryTable from,
193 const MetahandleSet& handles) {
194 if (handles.empty())
195 return true;
196
197 sql::Statement statement;
198 // Call GetCachedStatement() separately to get different statements for
199 // different tables.
200 switch (from) {
201 case METAS_TABLE:
202 statement.Assign(db_->GetCachedStatement(
203 SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
204 break;
205 case DELETE_JOURNAL_TABLE:
206 statement.Assign(db_->GetCachedStatement(
207 SQL_FROM_HERE, "DELETE FROM deleted_metas WHERE metahandle = ?"));
208 break;
209 }
210
211 for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end();
212 ++i) {
213 statement.BindInt64(0, *i);
214 if (!statement.Run())
215 return false;
216 statement.Reset(true);
217 }
218 return true;
219 }
220
SaveChanges(const Directory::SaveChangesSnapshot & snapshot)221 bool DirectoryBackingStore::SaveChanges(
222 const Directory::SaveChangesSnapshot& snapshot) {
223 DCHECK(CalledOnValidThread());
224 DCHECK(db_->is_open());
225
226 // Back out early if there is nothing to write.
227 bool save_info =
228 (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
229 if (snapshot.dirty_metas.empty() && snapshot.metahandles_to_purge.empty() &&
230 snapshot.delete_journals.empty() &&
231 snapshot.delete_journals_to_purge.empty() && !save_info) {
232 return true;
233 }
234
235 sql::Transaction transaction(db_.get());
236 if (!transaction.Begin())
237 return false;
238
239 PrepareSaveEntryStatement(METAS_TABLE, &save_meta_statment_);
240 for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
241 i != snapshot.dirty_metas.end(); ++i) {
242 DCHECK((*i)->is_dirty());
243 if (!SaveEntryToDB(&save_meta_statment_, **i))
244 return false;
245 }
246
247 if (!DeleteEntries(METAS_TABLE, snapshot.metahandles_to_purge))
248 return false;
249
250 PrepareSaveEntryStatement(DELETE_JOURNAL_TABLE,
251 &save_delete_journal_statment_);
252 for (EntryKernelSet::const_iterator i = snapshot.delete_journals.begin();
253 i != snapshot.delete_journals.end(); ++i) {
254 if (!SaveEntryToDB(&save_delete_journal_statment_, **i))
255 return false;
256 }
257
258 if (!DeleteEntries(DELETE_JOURNAL_TABLE, snapshot.delete_journals_to_purge))
259 return false;
260
261 if (save_info) {
262 const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
263 sql::Statement s1(db_->GetCachedStatement(
264 SQL_FROM_HERE,
265 "UPDATE share_info "
266 "SET store_birthday = ?, "
267 "next_id = ?, "
268 "bag_of_chips = ?"));
269 s1.BindString(0, info.store_birthday);
270 s1.BindInt64(1, info.next_id);
271 s1.BindBlob(2, info.bag_of_chips.data(), info.bag_of_chips.size());
272
273 if (!s1.Run())
274 return false;
275 DCHECK_EQ(db_->GetLastChangeCount(), 1);
276
277 sql::Statement s2(db_->GetCachedStatement(
278 SQL_FROM_HERE,
279 "INSERT OR REPLACE "
280 "INTO models (model_id, "
281 "progress_marker, "
282 "transaction_version, "
283 "context) "
284 "VALUES (?, ?, ?, ?)"));
285
286 ModelTypeSet protocol_types = ProtocolTypes();
287 for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
288 iter.Inc()) {
289 ModelType type = iter.Get();
290 // We persist not ModelType but rather a protobuf-derived ID.
291 string model_id = ModelTypeEnumToModelId(type);
292 string progress_marker;
293 info.download_progress[type].SerializeToString(&progress_marker);
294 s2.BindBlob(0, model_id.data(), model_id.length());
295 s2.BindBlob(1, progress_marker.data(), progress_marker.length());
296 s2.BindInt64(2, info.transaction_version[type]);
297 string context;
298 info.datatype_context[type].SerializeToString(&context);
299 s2.BindBlob(3, context.data(), context.length());
300 if (!s2.Run())
301 return false;
302 DCHECK_EQ(db_->GetLastChangeCount(), 1);
303 s2.Reset(true);
304 }
305 }
306
307 return transaction.Commit();
308 }
309
InitializeTables()310 bool DirectoryBackingStore::InitializeTables() {
311 sql::Transaction transaction(db_.get());
312 if (!transaction.Begin())
313 return false;
314
315 int version_on_disk = GetVersion();
316
317 // Upgrade from version 67. Version 67 was widely distributed as the original
318 // Bookmark Sync release. Version 68 removed unique naming.
319 if (version_on_disk == 67) {
320 if (MigrateVersion67To68())
321 version_on_disk = 68;
322 }
323 // Version 69 introduced additional datatypes.
324 if (version_on_disk == 68) {
325 if (MigrateVersion68To69())
326 version_on_disk = 69;
327 }
328
329 if (version_on_disk == 69) {
330 if (MigrateVersion69To70())
331 version_on_disk = 70;
332 }
333
334 // Version 71 changed the sync progress information to be per-datatype.
335 if (version_on_disk == 70) {
336 if (MigrateVersion70To71())
337 version_on_disk = 71;
338 }
339
340 // Version 72 removed extended attributes, a legacy way to do extensible
341 // key/value information, stored in their own table.
342 if (version_on_disk == 71) {
343 if (MigrateVersion71To72())
344 version_on_disk = 72;
345 }
346
347 // Version 73 added a field for notification state.
348 if (version_on_disk == 72) {
349 if (MigrateVersion72To73())
350 version_on_disk = 73;
351 }
352
353 // Version 74 added state for the autofill migration.
354 if (version_on_disk == 73) {
355 if (MigrateVersion73To74())
356 version_on_disk = 74;
357 }
358
359 // Version 75 migrated from int64-based timestamps to per-datatype tokens.
360 if (version_on_disk == 74) {
361 if (MigrateVersion74To75())
362 version_on_disk = 75;
363 }
364
365 // Version 76 removed all (5) autofill migration related columns.
366 if (version_on_disk == 75) {
367 if (MigrateVersion75To76())
368 version_on_disk = 76;
369 }
370
371 // Version 77 standardized all time fields to ms since the Unix
372 // epoch.
373 if (version_on_disk == 76) {
374 if (MigrateVersion76To77())
375 version_on_disk = 77;
376 }
377
378 // Version 78 added the column base_server_specifics to the metas table.
379 if (version_on_disk == 77) {
380 if (MigrateVersion77To78())
381 version_on_disk = 78;
382 }
383
384 // Version 79 migration is a one-time fix for some users in a bad state.
385 if (version_on_disk == 78) {
386 if (MigrateVersion78To79())
387 version_on_disk = 79;
388 }
389
390 // Version 80 migration is adding the bag_of_chips column.
391 if (version_on_disk == 79) {
392 if (MigrateVersion79To80())
393 version_on_disk = 80;
394 }
395
396 // Version 81 replaces the int64 server_position_in_parent_field
397 // with a blob server_ordinal_in_parent field.
398 if (version_on_disk == 80) {
399 if (MigrateVersion80To81())
400 version_on_disk = 81;
401 }
402
403 // Version 82 migration added transaction_version column per data type.
404 if (version_on_disk == 81) {
405 if (MigrateVersion81To82())
406 version_on_disk = 82;
407 }
408
409 // Version 83 migration added transaction_version column per sync entry.
410 if (version_on_disk == 82) {
411 if (MigrateVersion82To83())
412 version_on_disk = 83;
413 }
414
415 // Version 84 migration added deleted_metas table.
416 if (version_on_disk == 83) {
417 if (MigrateVersion83To84())
418 version_on_disk = 84;
419 }
420
421 // Version 85 migration removes the initial_sync_ended bits.
422 if (version_on_disk == 84) {
423 if (MigrateVersion84To85())
424 version_on_disk = 85;
425 }
426
427 // Version 86 migration converts bookmarks to the unique positioning system.
428 // It also introduces a new field to store a unique ID for each bookmark.
429 if (version_on_disk == 85) {
430 if (MigrateVersion85To86())
431 version_on_disk = 86;
432 }
433
434 // Version 87 migration adds a collection of attachment ids per sync entry.
435 if (version_on_disk == 86) {
436 if (MigrateVersion86To87())
437 version_on_disk = 87;
438 }
439
440 // Version 88 migration adds datatype contexts to the models table.
441 if (version_on_disk == 87) {
442 if (MigrateVersion87To88())
443 version_on_disk = 88;
444 }
445
446 // If one of the migrations requested it, drop columns that aren't current.
447 // It's only safe to do this after migrating all the way to the current
448 // version.
449 if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) {
450 if (!RefreshColumns())
451 version_on_disk = 0;
452 }
453
454 // A final, alternative catch-all migration to simply re-sync everything.
455 if (version_on_disk != kCurrentDBVersion) {
456 if (version_on_disk > kCurrentDBVersion)
457 return false;
458
459 // Fallback (re-sync everything) migration path.
460 DVLOG(1) << "Old/null sync database, version " << version_on_disk;
461 // Delete the existing database (if any), and create a fresh one.
462 DropAllTables();
463 if (!CreateTables())
464 return false;
465 }
466
467 sql::Statement s(db_->GetUniqueStatement(
468 "SELECT db_create_version, db_create_time FROM share_info"));
469 if (!s.Step())
470 return false;
471 string db_create_version = s.ColumnString(0);
472 int db_create_time = s.ColumnInt(1);
473 DVLOG(1) << "DB created at " << db_create_time << " by version " <<
474 db_create_version;
475
476 return transaction.Commit();
477 }
478
479 // This function drops unused columns by creating a new table that contains only
480 // the currently used columns then copying all rows from the old tables into
481 // this new one. The tables are then rearranged so the new replaces the old.
RefreshColumns()482 bool DirectoryBackingStore::RefreshColumns() {
483 DCHECK(needs_column_refresh_);
484
485 // Create a new table named temp_metas.
486 SafeDropTable("temp_metas");
487 if (!CreateMetasTable(true))
488 return false;
489
490 // Populate temp_metas from metas.
491 //
492 // At this point, the metas table may contain columns belonging to obsolete
493 // schema versions. This statement explicitly lists only the columns that
494 // belong to the current schema version, so the obsolete columns will be
495 // effectively dropped once we rename temp_metas over top of metas.
496 std::string query = "INSERT INTO temp_metas (";
497 AppendColumnList(&query);
498 query.append(") SELECT ");
499 AppendColumnList(&query);
500 query.append(" FROM metas");
501 if (!db_->Execute(query.c_str()))
502 return false;
503
504 // Drop metas.
505 SafeDropTable("metas");
506
507 // Rename temp_metas -> metas.
508 if (!db_->Execute("ALTER TABLE temp_metas RENAME TO metas"))
509 return false;
510
511 // Repeat the process for share_info.
512 SafeDropTable("temp_share_info");
513 if (!CreateShareInfoTable(true))
514 return false;
515
516 // TODO(rlarocque, 124140): Remove notification_state.
517 if (!db_->Execute(
518 "INSERT INTO temp_share_info (id, name, store_birthday, "
519 "db_create_version, db_create_time, next_id, cache_guid,"
520 "notification_state, bag_of_chips) "
521 "SELECT id, name, store_birthday, db_create_version, "
522 "db_create_time, next_id, cache_guid, notification_state, "
523 "bag_of_chips "
524 "FROM share_info"))
525 return false;
526
527 SafeDropTable("share_info");
528 if (!db_->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
529 return false;
530
531 needs_column_refresh_ = false;
532 return true;
533 }
534
LoadEntries(Directory::MetahandlesMap * handles_map)535 bool DirectoryBackingStore::LoadEntries(
536 Directory::MetahandlesMap* handles_map) {
537 string select;
538 select.reserve(kUpdateStatementBufferSize);
539 select.append("SELECT ");
540 AppendColumnList(&select);
541 select.append(" FROM metas");
542
543 sql::Statement s(db_->GetUniqueStatement(select.c_str()));
544
545 while (s.Step()) {
546 scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
547 // A null kernel is evidence of external data corruption.
548 if (!kernel)
549 return false;
550
551 int64 handle = kernel->ref(META_HANDLE);
552 (*handles_map)[handle] = kernel.release();
553 }
554 return s.Succeeded();
555 }
556
LoadDeleteJournals(JournalIndex * delete_journals)557 bool DirectoryBackingStore::LoadDeleteJournals(
558 JournalIndex* delete_journals) {
559 string select;
560 select.reserve(kUpdateStatementBufferSize);
561 select.append("SELECT ");
562 AppendColumnList(&select);
563 select.append(" FROM deleted_metas");
564
565 sql::Statement s(db_->GetUniqueStatement(select.c_str()));
566
567 while (s.Step()) {
568 scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
569 // A null kernel is evidence of external data corruption.
570 if (!kernel)
571 return false;
572 delete_journals->insert(kernel.release());
573 }
574 return s.Succeeded();
575 }
576
LoadInfo(Directory::KernelLoadInfo * info)577 bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
578 {
579 sql::Statement s(
580 db_->GetUniqueStatement(
581 "SELECT store_birthday, next_id, cache_guid, bag_of_chips "
582 "FROM share_info"));
583 if (!s.Step())
584 return false;
585
586 info->kernel_info.store_birthday = s.ColumnString(0);
587 info->kernel_info.next_id = s.ColumnInt64(1);
588 info->cache_guid = s.ColumnString(2);
589 s.ColumnBlobAsString(3, &(info->kernel_info.bag_of_chips));
590
591 // Verify there was only one row returned.
592 DCHECK(!s.Step());
593 DCHECK(s.Succeeded());
594 }
595
596 {
597 sql::Statement s(
598 db_->GetUniqueStatement(
599 "SELECT model_id, progress_marker, "
600 "transaction_version, context FROM models"));
601
602 while (s.Step()) {
603 ModelType type = ModelIdToModelTypeEnum(s.ColumnBlob(0),
604 s.ColumnByteLength(0));
605 if (type != UNSPECIFIED && type != TOP_LEVEL_FOLDER) {
606 info->kernel_info.download_progress[type].ParseFromArray(
607 s.ColumnBlob(1), s.ColumnByteLength(1));
608 info->kernel_info.transaction_version[type] = s.ColumnInt64(2);
609 info->kernel_info.datatype_context[type].ParseFromArray(
610 s.ColumnBlob(3), s.ColumnByteLength(3));
611 }
612 }
613 if (!s.Succeeded())
614 return false;
615 }
616 {
617 sql::Statement s(
618 db_->GetUniqueStatement(
619 "SELECT MAX(metahandle) FROM metas"));
620 if (!s.Step())
621 return false;
622
623 info->max_metahandle = s.ColumnInt64(0);
624
625 // Verify only one row was returned.
626 DCHECK(!s.Step());
627 DCHECK(s.Succeeded());
628 }
629 return true;
630 }
631
632 /* static */
SaveEntryToDB(sql::Statement * save_statement,const EntryKernel & entry)633 bool DirectoryBackingStore::SaveEntryToDB(sql::Statement* save_statement,
634 const EntryKernel& entry) {
635 save_statement->Reset(true);
636 BindFields(entry, save_statement);
637 return save_statement->Run();
638 }
639
DropDeletedEntries()640 bool DirectoryBackingStore::DropDeletedEntries() {
641 if (!db_->Execute("DELETE FROM metas "
642 "WHERE is_del > 0 "
643 "AND is_unsynced < 1 "
644 "AND is_unapplied_update < 1")) {
645 return false;
646 }
647 if (!db_->Execute("DELETE FROM metas "
648 "WHERE is_del > 0 "
649 "AND id LIKE 'c%'")) {
650 return false;
651 }
652 return true;
653 }
654
SafeDropTable(const char * table_name)655 bool DirectoryBackingStore::SafeDropTable(const char* table_name) {
656 string query = "DROP TABLE IF EXISTS ";
657 query.append(table_name);
658 return db_->Execute(query.c_str());
659 }
660
DropAllTables()661 void DirectoryBackingStore::DropAllTables() {
662 SafeDropTable("metas");
663 SafeDropTable("temp_metas");
664 SafeDropTable("share_info");
665 SafeDropTable("temp_share_info");
666 SafeDropTable("share_version");
667 SafeDropTable("extended_attributes");
668 SafeDropTable("models");
669 SafeDropTable("temp_models");
670 needs_column_refresh_ = false;
671 }
672
673 // static
ModelIdToModelTypeEnum(const void * data,int size)674 ModelType DirectoryBackingStore::ModelIdToModelTypeEnum(
675 const void* data, int size) {
676 sync_pb::EntitySpecifics specifics;
677 if (!specifics.ParseFromArray(data, size))
678 return UNSPECIFIED;
679 return GetModelTypeFromSpecifics(specifics);
680 }
681
682 // static
ModelTypeEnumToModelId(ModelType model_type)683 string DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type) {
684 sync_pb::EntitySpecifics specifics;
685 AddDefaultFieldValue(model_type, &specifics);
686 return specifics.SerializeAsString();
687 }
688
689 // static
GenerateCacheGUID()690 std::string DirectoryBackingStore::GenerateCacheGUID() {
691 // Generate a GUID with 128 bits of randomness.
692 const int kGuidBytes = 128 / 8;
693 std::string guid;
694 base::Base64Encode(base::RandBytesAsString(kGuidBytes), &guid);
695 return guid;
696 }
697
MigrateToSpecifics(const char * old_columns,const char * specifics_column,void (* handler_function)(sql::Statement * old_value_query,int old_value_column,sync_pb::EntitySpecifics * mutable_new_value))698 bool DirectoryBackingStore::MigrateToSpecifics(
699 const char* old_columns,
700 const char* specifics_column,
701 void (*handler_function)(sql::Statement* old_value_query,
702 int old_value_column,
703 sync_pb::EntitySpecifics* mutable_new_value)) {
704 std::string query_sql = base::StringPrintf(
705 "SELECT metahandle, %s, %s FROM metas", specifics_column, old_columns);
706 std::string update_sql = base::StringPrintf(
707 "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column);
708
709 sql::Statement query(db_->GetUniqueStatement(query_sql.c_str()));
710 sql::Statement update(db_->GetUniqueStatement(update_sql.c_str()));
711
712 while (query.Step()) {
713 int64 metahandle = query.ColumnInt64(0);
714 std::string new_value_bytes;
715 query.ColumnBlobAsString(1, &new_value_bytes);
716 sync_pb::EntitySpecifics new_value;
717 new_value.ParseFromString(new_value_bytes);
718 handler_function(&query, 2, &new_value);
719 new_value.SerializeToString(&new_value_bytes);
720
721 update.BindBlob(0, new_value_bytes.data(), new_value_bytes.length());
722 update.BindInt64(1, metahandle);
723 if (!update.Run())
724 return false;
725 update.Reset(true);
726 }
727 return query.Succeeded();
728 }
729
SetVersion(int version)730 bool DirectoryBackingStore::SetVersion(int version) {
731 sql::Statement s(db_->GetCachedStatement(
732 SQL_FROM_HERE, "UPDATE share_version SET data = ?"));
733 s.BindInt(0, version);
734
735 return s.Run();
736 }
737
GetVersion()738 int DirectoryBackingStore::GetVersion() {
739 if (!db_->DoesTableExist("share_version"))
740 return 0;
741
742 sql::Statement statement(db_->GetUniqueStatement(
743 "SELECT data FROM share_version"));
744 if (statement.Step()) {
745 return statement.ColumnInt(0);
746 } else {
747 return 0;
748 }
749 }
750
MigrateVersion67To68()751 bool DirectoryBackingStore::MigrateVersion67To68() {
752 // This change simply removed three columns:
753 // string NAME
754 // string UNSANITIZED_NAME
755 // string SERVER_NAME
756 // No data migration is necessary, but we should do a column refresh.
757 SetVersion(68);
758 needs_column_refresh_ = true;
759 return true;
760 }
761
MigrateVersion69To70()762 bool DirectoryBackingStore::MigrateVersion69To70() {
763 // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
764 SetVersion(70);
765 if (!db_->Execute(
766 "ALTER TABLE metas ADD COLUMN unique_server_tag varchar"))
767 return false;
768 if (!db_->Execute(
769 "ALTER TABLE metas ADD COLUMN unique_client_tag varchar"))
770 return false;
771 needs_column_refresh_ = true;
772
773 if (!db_->Execute(
774 "UPDATE metas SET unique_server_tag = singleton_tag"))
775 return false;
776
777 return true;
778 }
779
780 namespace {
781
782 // Callback passed to MigrateToSpecifics for the v68->v69 migration. See
783 // MigrateVersion68To69().
EncodeBookmarkURLAndFavicon(sql::Statement * old_value_query,int old_value_column,sync_pb::EntitySpecifics * mutable_new_value)784 void EncodeBookmarkURLAndFavicon(sql::Statement* old_value_query,
785 int old_value_column,
786 sync_pb::EntitySpecifics* mutable_new_value) {
787 // Extract data from the column trio we expect.
788 bool old_is_bookmark_object = old_value_query->ColumnBool(old_value_column);
789 std::string old_url = old_value_query->ColumnString(old_value_column + 1);
790 std::string old_favicon;
791 old_value_query->ColumnBlobAsString(old_value_column + 2, &old_favicon);
792 bool old_is_dir = old_value_query->ColumnBool(old_value_column + 3);
793
794 if (old_is_bookmark_object) {
795 sync_pb::BookmarkSpecifics* bookmark_data =
796 mutable_new_value->mutable_bookmark();
797 if (!old_is_dir) {
798 bookmark_data->set_url(old_url);
799 bookmark_data->set_favicon(old_favicon);
800 }
801 }
802 }
803
804 } // namespace
805
MigrateVersion68To69()806 bool DirectoryBackingStore::MigrateVersion68To69() {
807 // In Version 68, there were columns on table 'metas':
808 // string BOOKMARK_URL
809 // string SERVER_BOOKMARK_URL
810 // blob BOOKMARK_FAVICON
811 // blob SERVER_BOOKMARK_FAVICON
812 // In version 69, these columns went away in favor of storing
813 // a serialized EntrySpecifics protobuf in the columns:
814 // protobuf blob SPECIFICS
815 // protobuf blob SERVER_SPECIFICS
816 // For bookmarks, EntrySpecifics is extended as per
817 // bookmark_specifics.proto. This migration converts bookmarks from the
818 // former scheme to the latter scheme.
819
820 // First, add the two new columns to the schema.
821 if (!db_->Execute(
822 "ALTER TABLE metas ADD COLUMN specifics blob"))
823 return false;
824 if (!db_->Execute(
825 "ALTER TABLE metas ADD COLUMN server_specifics blob"))
826 return false;
827
828 // Next, fold data from the old columns into the new protobuf columns.
829 if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
830 "bookmark_favicon, is_dir"),
831 "specifics",
832 &EncodeBookmarkURLAndFavicon)) {
833 return false;
834 }
835 if (!MigrateToSpecifics(("server_is_bookmark_object, "
836 "server_bookmark_url, "
837 "server_bookmark_favicon, "
838 "server_is_dir"),
839 "server_specifics",
840 &EncodeBookmarkURLAndFavicon)) {
841 return false;
842 }
843
844 // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
845 // ModelType: it shouldn't have BookmarkSpecifics.
846 if (!db_->Execute(
847 "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
848 "singleton_tag IN ('google_chrome')"))
849 return false;
850
851 SetVersion(69);
852 needs_column_refresh_ = true; // Trigger deletion of old columns.
853 return true;
854 }
855
856 // Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
857 // were removed from the share_info table. They were replaced by
858 // the 'models' table, which has these values on a per-datatype basis.
MigrateVersion70To71()859 bool DirectoryBackingStore::MigrateVersion70To71() {
860 if (!CreateV71ModelsTable())
861 return false;
862
863 // Move data from the old share_info columns to the new models table.
864 {
865 sql::Statement fetch(db_->GetUniqueStatement(
866 "SELECT last_sync_timestamp, initial_sync_ended FROM share_info"));
867 if (!fetch.Step())
868 return false;
869
870 int64 last_sync_timestamp = fetch.ColumnInt64(0);
871 bool initial_sync_ended = fetch.ColumnBool(1);
872
873 // Verify there were no additional rows returned.
874 DCHECK(!fetch.Step());
875 DCHECK(fetch.Succeeded());
876
877 sql::Statement update(db_->GetUniqueStatement(
878 "INSERT INTO models (model_id, "
879 "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)"));
880 string bookmark_model_id = ModelTypeEnumToModelId(BOOKMARKS);
881 update.BindBlob(0, bookmark_model_id.data(), bookmark_model_id.size());
882 update.BindInt64(1, last_sync_timestamp);
883 update.BindBool(2, initial_sync_ended);
884
885 if (!update.Run())
886 return false;
887 }
888
889 // Drop the columns from the old share_info table via a temp table.
890 const bool kCreateAsTempShareInfo = true;
891
892 if (!CreateShareInfoTableVersion71(kCreateAsTempShareInfo))
893 return false;
894 if (!db_->Execute(
895 "INSERT INTO temp_share_info (id, name, store_birthday, "
896 "db_create_version, db_create_time, next_id, cache_guid) "
897 "SELECT id, name, store_birthday, db_create_version, "
898 "db_create_time, next_id, cache_guid FROM share_info"))
899 return false;
900 SafeDropTable("share_info");
901 if (!db_->Execute(
902 "ALTER TABLE temp_share_info RENAME TO share_info"))
903 return false;
904 SetVersion(71);
905 return true;
906 }
907
MigrateVersion71To72()908 bool DirectoryBackingStore::MigrateVersion71To72() {
909 // Version 72 removed a table 'extended_attributes', whose
910 // contents didn't matter.
911 SafeDropTable("extended_attributes");
912 SetVersion(72);
913 return true;
914 }
915
MigrateVersion72To73()916 bool DirectoryBackingStore::MigrateVersion72To73() {
917 // Version 73 added one column to the table 'share_info': notification_state
918 if (!db_->Execute(
919 "ALTER TABLE share_info ADD COLUMN notification_state BLOB"))
920 return false;
921 SetVersion(73);
922 return true;
923 }
924
MigrateVersion73To74()925 bool DirectoryBackingStore::MigrateVersion73To74() {
926 // Version 74 added the following columns to the table 'share_info':
927 // autofill_migration_state
928 // bookmarks_added_during_autofill_migration
929 // autofill_migration_time
930 // autofill_entries_added_during_migration
931 // autofill_profiles_added_during_migration
932
933 if (!db_->Execute(
934 "ALTER TABLE share_info ADD COLUMN "
935 "autofill_migration_state INT default 0"))
936 return false;
937
938 if (!db_->Execute(
939 "ALTER TABLE share_info ADD COLUMN "
940 "bookmarks_added_during_autofill_migration "
941 "INT default 0"))
942 return false;
943
944 if (!db_->Execute(
945 "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
946 "INT default 0"))
947 return false;
948
949 if (!db_->Execute(
950 "ALTER TABLE share_info ADD COLUMN "
951 "autofill_entries_added_during_migration "
952 "INT default 0"))
953 return false;
954
955 if (!db_->Execute(
956 "ALTER TABLE share_info ADD COLUMN "
957 "autofill_profiles_added_during_migration "
958 "INT default 0"))
959 return false;
960
961 SetVersion(74);
962 return true;
963 }
964
MigrateVersion74To75()965 bool DirectoryBackingStore::MigrateVersion74To75() {
966 // In version 74, there was a table 'models':
967 // blob model_id (entity specifics, primary key)
968 // int last_download_timestamp
969 // boolean initial_sync_ended
970 // In version 75, we deprecated the integer-valued last_download_timestamp,
971 // using insted a protobuf-valued progress_marker field:
972 // blob progress_marker
973 // The progress_marker values are initialized from the value of
974 // last_download_timestamp, thereby preserving the download state.
975
976 // Move aside the old table and create a new empty one at the current schema.
977 if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
978 return false;
979 if (!CreateV75ModelsTable())
980 return false;
981
982 sql::Statement query(db_->GetUniqueStatement(
983 "SELECT model_id, last_download_timestamp, initial_sync_ended "
984 "FROM temp_models"));
985
986 sql::Statement update(db_->GetUniqueStatement(
987 "INSERT INTO models (model_id, "
988 "progress_marker, initial_sync_ended) VALUES (?, ?, ?)"));
989
990 while (query.Step()) {
991 ModelType type = ModelIdToModelTypeEnum(query.ColumnBlob(0),
992 query.ColumnByteLength(0));
993 if (type != UNSPECIFIED) {
994 // Set the |timestamp_token_for_migration| on a new
995 // DataTypeProgressMarker, using the old value of last_download_timestamp.
996 // The server will turn this into a real token on our behalf the next
997 // time we check for updates.
998 sync_pb::DataTypeProgressMarker progress_marker;
999 progress_marker.set_data_type_id(
1000 GetSpecificsFieldNumberFromModelType(type));
1001 progress_marker.set_timestamp_token_for_migration(query.ColumnInt64(1));
1002 std::string progress_blob;
1003 progress_marker.SerializeToString(&progress_blob);
1004
1005 update.BindBlob(0, query.ColumnBlob(0), query.ColumnByteLength(0));
1006 update.BindBlob(1, progress_blob.data(), progress_blob.length());
1007 update.BindBool(2, query.ColumnBool(2));
1008 if (!update.Run())
1009 return false;
1010 update.Reset(true);
1011 }
1012 }
1013 if (!query.Succeeded())
1014 return false;
1015
1016 // Drop the old table.
1017 SafeDropTable("temp_models");
1018
1019 SetVersion(75);
1020 return true;
1021 }
1022
MigrateVersion75To76()1023 bool DirectoryBackingStore::MigrateVersion75To76() {
1024 // This change removed five columns:
1025 // autofill_migration_state
1026 // bookmarks_added_during_autofill_migration
1027 // autofill_migration_time
1028 // autofill_entries_added_during_migration
1029 // autofill_profiles_added_during_migration
1030 // No data migration is necessary, but we should do a column refresh.
1031 SetVersion(76);
1032 needs_column_refresh_ = true;
1033 return true;
1034 }
1035
MigrateVersion76To77()1036 bool DirectoryBackingStore::MigrateVersion76To77() {
1037 // This change changes the format of stored timestamps to ms since
1038 // the Unix epoch.
1039 #if defined(OS_WIN)
1040 // On Windows, we used to store timestamps in FILETIME format (100s of
1041 // ns since Jan 1, 1601). Magic numbers taken from
1042 // http://stackoverflow.com/questions/5398557/
1043 // java-library-for-dealing-with-win32-filetime
1044 // .
1045 #define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
1046 #else
1047 // On other platforms, we used to store timestamps in time_t format (s
1048 // since the Unix epoch).
1049 #define TO_UNIX_TIME_MS(x) #x " = " #x " * 1000"
1050 #endif
1051 sql::Statement update_timestamps(db_->GetUniqueStatement(
1052 "UPDATE metas SET "
1053 TO_UNIX_TIME_MS(mtime) ", "
1054 TO_UNIX_TIME_MS(server_mtime) ", "
1055 TO_UNIX_TIME_MS(ctime) ", "
1056 TO_UNIX_TIME_MS(server_ctime)));
1057 #undef TO_UNIX_TIME_MS
1058 if (!update_timestamps.Run())
1059 return false;
1060 SetVersion(77);
1061 return true;
1062 }
1063
MigrateVersion77To78()1064 bool DirectoryBackingStore::MigrateVersion77To78() {
1065 // Version 78 added one column to table 'metas': base_server_specifics.
1066 if (!db_->Execute(
1067 "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB")) {
1068 return false;
1069 }
1070 SetVersion(78);
1071 return true;
1072 }
1073
MigrateVersion78To79()1074 bool DirectoryBackingStore::MigrateVersion78To79() {
1075 // Some users are stuck with a DB that causes them to reuse existing IDs. We
1076 // perform this one-time fixup on all users to help the few that are stuck.
1077 // See crbug.com/142987 for details.
1078 if (!db_->Execute(
1079 "UPDATE share_info SET next_id = next_id - 65536")) {
1080 return false;
1081 }
1082 SetVersion(79);
1083 return true;
1084 }
1085
MigrateVersion79To80()1086 bool DirectoryBackingStore::MigrateVersion79To80() {
1087 if (!db_->Execute(
1088 "ALTER TABLE share_info ADD COLUMN bag_of_chips BLOB"))
1089 return false;
1090 sql::Statement update(db_->GetUniqueStatement(
1091 "UPDATE share_info SET bag_of_chips = ?"));
1092 // An empty message is serialized to an empty string.
1093 update.BindBlob(0, NULL, 0);
1094 if (!update.Run())
1095 return false;
1096 SetVersion(80);
1097 return true;
1098 }
1099
MigrateVersion80To81()1100 bool DirectoryBackingStore::MigrateVersion80To81() {
1101 if(!db_->Execute(
1102 "ALTER TABLE metas ADD COLUMN server_ordinal_in_parent BLOB"))
1103 return false;
1104
1105 sql::Statement get_positions(db_->GetUniqueStatement(
1106 "SELECT metahandle, server_position_in_parent FROM metas"));
1107
1108 sql::Statement put_ordinals(db_->GetUniqueStatement(
1109 "UPDATE metas SET server_ordinal_in_parent = ?"
1110 "WHERE metahandle = ?"));
1111
1112 while(get_positions.Step()) {
1113 int64 metahandle = get_positions.ColumnInt64(0);
1114 int64 position = get_positions.ColumnInt64(1);
1115
1116 const std::string& ordinal = Int64ToNodeOrdinal(position).ToInternalValue();
1117 put_ordinals.BindBlob(0, ordinal.data(), ordinal.length());
1118 put_ordinals.BindInt64(1, metahandle);
1119
1120 if(!put_ordinals.Run())
1121 return false;
1122 put_ordinals.Reset(true);
1123 }
1124
1125 SetVersion(81);
1126 needs_column_refresh_ = true;
1127 return true;
1128 }
1129
MigrateVersion81To82()1130 bool DirectoryBackingStore::MigrateVersion81To82() {
1131 if (!db_->Execute(
1132 "ALTER TABLE models ADD COLUMN transaction_version BIGINT default 0"))
1133 return false;
1134 sql::Statement update(db_->GetUniqueStatement(
1135 "UPDATE models SET transaction_version = 0"));
1136 if (!update.Run())
1137 return false;
1138 SetVersion(82);
1139 return true;
1140 }
1141
MigrateVersion82To83()1142 bool DirectoryBackingStore::MigrateVersion82To83() {
1143 // Version 83 added transaction_version on sync node.
1144 if (!db_->Execute(
1145 "ALTER TABLE metas ADD COLUMN transaction_version BIGINT default 0"))
1146 return false;
1147 sql::Statement update(db_->GetUniqueStatement(
1148 "UPDATE metas SET transaction_version = 0"));
1149 if (!update.Run())
1150 return false;
1151 SetVersion(83);
1152 return true;
1153 }
1154
MigrateVersion83To84()1155 bool DirectoryBackingStore::MigrateVersion83To84() {
1156 // Version 84 added deleted_metas table to store deleted metas until we know
1157 // for sure that the deletions are persisted in native models.
1158 string query = "CREATE TABLE deleted_metas ";
1159 query.append(ComposeCreateTableColumnSpecs());
1160 if (!db_->Execute(query.c_str()))
1161 return false;
1162 SetVersion(84);
1163 return true;
1164 }
1165
MigrateVersion84To85()1166 bool DirectoryBackingStore::MigrateVersion84To85() {
1167 // Version 85 removes the initial_sync_ended flag.
1168 if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
1169 return false;
1170 if (!CreateV81ModelsTable())
1171 return false;
1172 if (!db_->Execute("INSERT INTO models SELECT "
1173 "model_id, progress_marker, transaction_version "
1174 "FROM temp_models")) {
1175 return false;
1176 }
1177 SafeDropTable("temp_models");
1178
1179 SetVersion(85);
1180 return true;
1181 }
1182
MigrateVersion85To86()1183 bool DirectoryBackingStore::MigrateVersion85To86() {
1184 // Version 86 removes both server ordinals and local NEXT_ID, PREV_ID and
1185 // SERVER_{POSITION,ORDINAL}_IN_PARENT and replaces them with UNIQUE_POSITION
1186 // and SERVER_UNIQUE_POSITION.
1187 if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
1188 "server_unique_position BLOB")) {
1189 return false;
1190 }
1191 if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
1192 "unique_position BLOB")) {
1193 return false;
1194 }
1195 if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
1196 "unique_bookmark_tag VARCHAR")) {
1197 return false;
1198 }
1199
1200 // Fetch the cache_guid from the DB, because we don't otherwise have access to
1201 // it from here.
1202 sql::Statement get_cache_guid(db_->GetUniqueStatement(
1203 "SELECT cache_guid FROM share_info"));
1204 if (!get_cache_guid.Step()) {
1205 return false;
1206 }
1207 std::string cache_guid = get_cache_guid.ColumnString(0);
1208 DCHECK(!get_cache_guid.Step());
1209 DCHECK(get_cache_guid.Succeeded());
1210
1211 sql::Statement get(db_->GetUniqueStatement(
1212 "SELECT "
1213 " metahandle, "
1214 " id, "
1215 " specifics, "
1216 " is_dir, "
1217 " unique_server_tag, "
1218 " server_ordinal_in_parent "
1219 "FROM metas"));
1220
1221 // Note that we set both the local and server position based on the server
1222 // position. We wll lose any unsynced local position changes. Unfortunately,
1223 // there's nothing we can do to avoid that. The NEXT_ID / PREV_ID values
1224 // can't be translated into a UNIQUE_POSTION in a reliable way.
1225 sql::Statement put(db_->GetCachedStatement(
1226 SQL_FROM_HERE,
1227 "UPDATE metas SET"
1228 " server_unique_position = ?,"
1229 " unique_position = ?,"
1230 " unique_bookmark_tag = ?"
1231 "WHERE metahandle = ?"));
1232
1233 while (get.Step()) {
1234 int64 metahandle = get.ColumnInt64(0);
1235
1236 std::string id_string;
1237 get.ColumnBlobAsString(1, &id_string);
1238
1239 sync_pb::EntitySpecifics specifics;
1240 specifics.ParseFromArray(
1241 get.ColumnBlob(2), get.ColumnByteLength(2));
1242
1243 bool is_dir = get.ColumnBool(3);
1244
1245 std::string server_unique_tag = get.ColumnString(4);
1246
1247 std::string ordinal_string;
1248 get.ColumnBlobAsString(5, &ordinal_string);
1249 NodeOrdinal ordinal(ordinal_string);
1250
1251
1252 std::string unique_bookmark_tag;
1253
1254 // We only maintain positions for bookmarks that are not server-defined
1255 // top-level folders.
1256 UniquePosition position;
1257 if (GetModelTypeFromSpecifics(specifics) == BOOKMARKS
1258 && !(is_dir && !server_unique_tag.empty())) {
1259 if (id_string.at(0) == 'c') {
1260 // We found an uncommitted item. This is rare, but fortunate. This
1261 // means we can set the bookmark tag according to the originator client
1262 // item ID and originator cache guid, because (unlike the other case) we
1263 // know that this client is the originator.
1264 unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
1265 cache_guid,
1266 id_string.substr(1));
1267 } else {
1268 // If we've already committed the item, then we don't know who the
1269 // originator was. We do not have access to the originator client item
1270 // ID and originator cache guid at this point.
1271 //
1272 // We will base our hash entirely on the server ID instead. This is
1273 // incorrect, but at least all clients that undergo this migration step
1274 // will be incorrect in the same way.
1275 //
1276 // To get everyone back into a synced state, we will update the bookmark
1277 // tag according to the originator_cache_guid and originator_item_id
1278 // when we see updates for this item. That should ensure that commonly
1279 // modified items will end up with the proper tag values eventually.
1280 unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
1281 std::string(), // cache_guid left intentionally blank.
1282 id_string.substr(1));
1283 }
1284
1285 int64 int_position = NodeOrdinalToInt64(ordinal);
1286 position = UniquePosition::FromInt64(int_position, unique_bookmark_tag);
1287 } else {
1288 // Leave bookmark_tag and position at their default (invalid) values.
1289 }
1290
1291 std::string position_blob;
1292 position.SerializeToString(&position_blob);
1293 put.BindBlob(0, position_blob.data(), position_blob.length());
1294 put.BindBlob(1, position_blob.data(), position_blob.length());
1295 put.BindBlob(2, unique_bookmark_tag.data(), unique_bookmark_tag.length());
1296 put.BindInt64(3, metahandle);
1297
1298 if (!put.Run())
1299 return false;
1300 put.Reset(true);
1301 }
1302
1303 SetVersion(86);
1304 needs_column_refresh_ = true;
1305 return true;
1306 }
1307
MigrateVersion86To87()1308 bool DirectoryBackingStore::MigrateVersion86To87() {
1309 // Version 87 adds AttachmentMetadata proto.
1310 if (!db_->Execute(
1311 "ALTER TABLE metas ADD COLUMN "
1312 "attachment_metadata BLOB")) {
1313 return false;
1314 }
1315 SetVersion(87);
1316 needs_column_refresh_ = true;
1317 return true;
1318 }
1319
MigrateVersion87To88()1320 bool DirectoryBackingStore::MigrateVersion87To88() {
1321 // Version 88 adds the datatype context to the models table.
1322 if (!db_->Execute("ALTER TABLE models ADD COLUMN context blob"))
1323 return false;
1324
1325 SetVersion(88);
1326 return true;
1327 }
1328
CreateTables()1329 bool DirectoryBackingStore::CreateTables() {
1330 DVLOG(1) << "First run, creating tables";
1331 // Create two little tables share_version and share_info
1332 if (!db_->Execute(
1333 "CREATE TABLE share_version ("
1334 "id VARCHAR(128) primary key, data INT)")) {
1335 return false;
1336 }
1337
1338 {
1339 sql::Statement s(db_->GetUniqueStatement(
1340 "INSERT INTO share_version VALUES(?, ?)"));
1341 s.BindString(0, dir_name_);
1342 s.BindInt(1, kCurrentDBVersion);
1343
1344 if (!s.Run())
1345 return false;
1346 }
1347
1348 const bool kCreateAsTempShareInfo = false;
1349 if (!CreateShareInfoTable(kCreateAsTempShareInfo)) {
1350 return false;
1351 }
1352
1353 {
1354 sql::Statement s(db_->GetUniqueStatement(
1355 "INSERT INTO share_info VALUES"
1356 "(?, " // id
1357 "?, " // name
1358 "?, " // store_birthday
1359 "?, " // db_create_version
1360 "?, " // db_create_time
1361 "-2, " // next_id
1362 "?, " // cache_guid
1363 // TODO(rlarocque, 124140): Remove notification_state field.
1364 "?, " // notification_state
1365 "?);")); // bag_of_chips
1366 s.BindString(0, dir_name_); // id
1367 s.BindString(1, dir_name_); // name
1368 s.BindString(2, std::string()); // store_birthday
1369 // TODO(akalin): Remove this unused db_create_version field. (Or
1370 // actually use it for something.) http://crbug.com/118356
1371 s.BindString(3, "Unknown"); // db_create_version
1372 s.BindInt(4, static_cast<int32>(time(0))); // db_create_time
1373 s.BindString(5, GenerateCacheGUID()); // cache_guid
1374 // TODO(rlarocque, 124140): Remove this unused notification-state field.
1375 s.BindBlob(6, NULL, 0); // notification_state
1376 s.BindBlob(7, NULL, 0); // bag_of_chips
1377 if (!s.Run())
1378 return false;
1379 }
1380
1381 if (!CreateModelsTable())
1382 return false;
1383
1384 // Create the big metas table.
1385 if (!CreateMetasTable(false))
1386 return false;
1387
1388 {
1389 // Insert the entry for the root into the metas table.
1390 const int64 now = TimeToProtoTime(base::Time::Now());
1391 sql::Statement s(db_->GetUniqueStatement(
1392 "INSERT INTO metas "
1393 "( id, metahandle, is_dir, ctime, mtime ) "
1394 "VALUES ( \"r\", 1, 1, ?, ? )"));
1395 s.BindInt64(0, now);
1396 s.BindInt64(1, now);
1397
1398 if (!s.Run())
1399 return false;
1400 }
1401
1402 return true;
1403 }
1404
CreateMetasTable(bool is_temporary)1405 bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
1406 string query = "CREATE TABLE ";
1407 query.append(is_temporary ? "temp_metas" : "metas");
1408 query.append(ComposeCreateTableColumnSpecs());
1409 if (!db_->Execute(query.c_str()))
1410 return false;
1411
1412 // Create a deleted_metas table to save copies of deleted metas until the
1413 // deletions are persisted. For simplicity, don't try to migrate existing
1414 // data because it's rarely used.
1415 SafeDropTable("deleted_metas");
1416 query = "CREATE TABLE deleted_metas ";
1417 query.append(ComposeCreateTableColumnSpecs());
1418 return db_->Execute(query.c_str());
1419 }
1420
CreateV71ModelsTable()1421 bool DirectoryBackingStore::CreateV71ModelsTable() {
1422 // This is an old schema for the Models table, used from versions 71 to 74.
1423 return db_->Execute(
1424 "CREATE TABLE models ("
1425 "model_id BLOB primary key, "
1426 "last_download_timestamp INT, "
1427 // Gets set if the syncer ever gets updates from the
1428 // server and the server returns 0. Lets us detect the
1429 // end of the initial sync.
1430 "initial_sync_ended BOOLEAN default 0)");
1431 }
1432
CreateV75ModelsTable()1433 bool DirectoryBackingStore::CreateV75ModelsTable() {
1434 // This is an old schema for the Models table, used from versions 75 to 80.
1435 return db_->Execute(
1436 "CREATE TABLE models ("
1437 "model_id BLOB primary key, "
1438 "progress_marker BLOB, "
1439 // Gets set if the syncer ever gets updates from the
1440 // server and the server returns 0. Lets us detect the
1441 // end of the initial sync.
1442 "initial_sync_ended BOOLEAN default 0)");
1443 }
1444
CreateV81ModelsTable()1445 bool DirectoryBackingStore::CreateV81ModelsTable() {
1446 // This is an old schema for the Models table, used from versions 81 to 87.
1447 return db_->Execute(
1448 "CREATE TABLE models ("
1449 "model_id BLOB primary key, "
1450 "progress_marker BLOB, "
1451 // Gets set if the syncer ever gets updates from the
1452 // server and the server returns 0. Lets us detect the
1453 // end of the initial sync.
1454 "transaction_version BIGINT default 0)");
1455 }
1456
CreateModelsTable()1457 bool DirectoryBackingStore::CreateModelsTable() {
1458 // This is the current schema for the Models table, from version 88
1459 // onward. If you change the schema, you'll probably want to double-check
1460 // the use of this function in the v84-v85 migration.
1461 return db_->Execute(
1462 "CREATE TABLE models ("
1463 "model_id BLOB primary key, "
1464 "progress_marker BLOB, "
1465 // Gets set if the syncer ever gets updates from the
1466 // server and the server returns 0. Lets us detect the
1467 // end of the initial sync.
1468 "transaction_version BIGINT default 0,"
1469 "context BLOB)");
1470 }
1471
CreateShareInfoTable(bool is_temporary)1472 bool DirectoryBackingStore::CreateShareInfoTable(bool is_temporary) {
1473 const char* name = is_temporary ? "temp_share_info" : "share_info";
1474 string query = "CREATE TABLE ";
1475 query.append(name);
1476 // This is the current schema for the ShareInfo table, from version 76
1477 // onward.
1478 query.append(" ("
1479 "id TEXT primary key, "
1480 "name TEXT, "
1481 "store_birthday TEXT, "
1482 "db_create_version TEXT, "
1483 "db_create_time INT, "
1484 "next_id INT default -2, "
1485 "cache_guid TEXT, "
1486 // TODO(rlarocque, 124140): Remove notification_state field.
1487 "notification_state BLOB, "
1488 "bag_of_chips BLOB"
1489 ")");
1490 return db_->Execute(query.c_str());
1491 }
1492
CreateShareInfoTableVersion71(bool is_temporary)1493 bool DirectoryBackingStore::CreateShareInfoTableVersion71(
1494 bool is_temporary) {
1495 const char* name = is_temporary ? "temp_share_info" : "share_info";
1496 string query = "CREATE TABLE ";
1497 query.append(name);
1498 // This is the schema for the ShareInfo table used from versions 71 to 72.
1499 query.append(" ("
1500 "id TEXT primary key, "
1501 "name TEXT, "
1502 "store_birthday TEXT, "
1503 "db_create_version TEXT, "
1504 "db_create_time INT, "
1505 "next_id INT default -2, "
1506 "cache_guid TEXT )");
1507 return db_->Execute(query.c_str());
1508 }
1509
1510 // This function checks to see if the given list of Metahandles has any nodes
1511 // whose PARENT_ID values refer to ID values that do not actually exist.
1512 // Returns true on success.
VerifyReferenceIntegrity(const Directory::MetahandlesMap * handles_map)1513 bool DirectoryBackingStore::VerifyReferenceIntegrity(
1514 const Directory::MetahandlesMap* handles_map) {
1515 TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
1516 using namespace syncable;
1517 typedef base::hash_set<std::string> IdsSet;
1518
1519 IdsSet ids_set;
1520 bool is_ok = true;
1521
1522 for (Directory::MetahandlesMap::const_iterator it = handles_map->begin();
1523 it != handles_map->end(); ++it) {
1524 EntryKernel* entry = it->second;
1525 bool is_duplicate_id = !(ids_set.insert(entry->ref(ID).value()).second);
1526 is_ok = is_ok && !is_duplicate_id;
1527 }
1528
1529 IdsSet::iterator end = ids_set.end();
1530 for (Directory::MetahandlesMap::const_iterator it = handles_map->begin();
1531 it != handles_map->end(); ++it) {
1532 EntryKernel* entry = it->second;
1533 bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end);
1534 if (!parent_exists) {
1535 return false;
1536 }
1537 }
1538 return is_ok;
1539 }
1540
PrepareSaveEntryStatement(EntryTable table,sql::Statement * save_statement)1541 void DirectoryBackingStore::PrepareSaveEntryStatement(
1542 EntryTable table, sql::Statement* save_statement) {
1543 if (save_statement->is_valid())
1544 return;
1545
1546 string query;
1547 query.reserve(kUpdateStatementBufferSize);
1548 switch (table) {
1549 case METAS_TABLE:
1550 query.append("INSERT OR REPLACE INTO metas ");
1551 break;
1552 case DELETE_JOURNAL_TABLE:
1553 query.append("INSERT OR REPLACE INTO deleted_metas ");
1554 break;
1555 }
1556
1557 string values;
1558 values.reserve(kUpdateStatementBufferSize);
1559 values.append(" VALUES ");
1560 const char* separator = "( ";
1561 int i = 0;
1562 for (i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
1563 query.append(separator);
1564 values.append(separator);
1565 separator = ", ";
1566 query.append(ColumnName(i));
1567 values.append("?");
1568 }
1569 query.append(" ) ");
1570 values.append(" )");
1571 query.append(values);
1572 save_statement->Assign(db_->GetUniqueStatement(
1573 base::StringPrintf(query.c_str(), "metas").c_str()));
1574 }
1575
1576 } // namespace syncable
1577 } // namespace syncer
1578