1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "schema_object.h"
17 #include "schema_utils.h"
18 #include "db_errno.h"
19 #include "log_print.h"
20 #include "schema_constant.h"
21
22 namespace DistributedDB {
23 namespace {
24 const std::string JSON_EXTRACT_FUNC_NAME = "json_extract_by_path";
25 const std::string FLATBUFFER_EXTRACT_FUNC_NAME = "flatbuffer_extract_by_path";
26
27 // For Json-Schema, display its original content before parse. For FlatBuffer-Schema, only display its parsed content.
DisplaySchemaLineByLine(SchemaType inType,const std::string & inSchema)28 void DisplaySchemaLineByLine(SchemaType inType, const std::string &inSchema)
29 {
30 constexpr uint32_t lengthPerLine = 400; // 400 char per line
31 constexpr uint32_t usualMaxLine = 25; // For normal schema, 25 line for 10k length is quite enough
32 LOGD("[Schema][Display] IS %s, LENGTH=%zu.", SchemaUtils::SchemaTypeString(inType).c_str(), inSchema.size());
33 uint32_t totalLine = (inSchema.size() + lengthPerLine - 1) / lengthPerLine;
34 for (uint32_t line = 0; line < totalLine; line++) {
35 if (line >= usualMaxLine) {
36 LOGD("......(UNCOMPLETED SCHEMA)");
37 break;
38 }
39 std::string lineStr = inSchema.substr(line * lengthPerLine, lengthPerLine);
40 LOGD("%s", lineStr.c_str());
41 }
42 }
43 }
44
GetExtractFuncName(SchemaType inSchemaType)45 std::string SchemaObject::GetExtractFuncName(SchemaType inSchemaType)
46 {
47 if (inSchemaType == SchemaType::JSON) {
48 return JSON_EXTRACT_FUNC_NAME;
49 } else {
50 return FLATBUFFER_EXTRACT_FUNC_NAME;
51 }
52 }
53
GenerateExtractSQL(SchemaType inSchemaType,const FieldPath & inFieldpath,FieldType inFieldType,uint32_t skipSize,const std::string & accessStr)54 std::string SchemaObject::GenerateExtractSQL(SchemaType inSchemaType, const FieldPath &inFieldpath,
55 FieldType inFieldType, uint32_t skipSize, const std::string &accessStr)
56 {
57 static std::map<FieldType, std::string> fieldTypeMapSQLiteType {
58 {FieldType::LEAF_FIELD_BOOL, "INT"},
59 {FieldType::LEAF_FIELD_INTEGER, "INT"},
60 {FieldType::LEAF_FIELD_LONG, "INT"},
61 {FieldType::LEAF_FIELD_DOUBLE, "REAL"},
62 {FieldType::LEAF_FIELD_STRING, "TEXT"},
63 };
64 if (inFieldpath.empty()) {
65 LOGE("[Schema][GenExtract] Path empty.");
66 return "";
67 }
68 if (fieldTypeMapSQLiteType.count(inFieldType) == 0) {
69 LOGE("[Schema][GenExtract] FieldType not support.");
70 return "";
71 }
72 std::string resultSql = " CAST("; // Reserve blank at begin for convenience.
73 resultSql += GetExtractFuncName(inSchemaType);
74 resultSql += "(" + accessStr + "value, '";
75 resultSql += SchemaUtils::FieldPathString(inFieldpath);
76 resultSql += "', ";
77 resultSql += std::to_string(skipSize);
78 resultSql += ") AS ";
79 resultSql += fieldTypeMapSQLiteType[inFieldType];
80 resultSql += ") "; // Reserve blank at end for convenience.
81 return resultSql;
82 }
83
SchemaObject()84 SchemaObject::SchemaObject() : flatbufferSchema_(*this) {};
85
SchemaObject(const SchemaObject & other)86 SchemaObject::SchemaObject(const SchemaObject &other)
87 : flatbufferSchema_(*this)
88 {
89 isValid_ = other.isValid_;
90 schemaType_ = other.schemaType_;
91 schemaString_ = other.schemaString_;
92 schemaVersion_ = other.schemaVersion_;
93 schemaMode_ = other.schemaMode_;
94 schemaSkipSize_ = other.schemaSkipSize_;
95 schemaIndexes_ = other.schemaIndexes_;
96 schemaDefine_ = other.schemaDefine_;
97 }
98
operator =(const SchemaObject & other)99 SchemaObject& SchemaObject::operator=(const SchemaObject &other)
100 {
101 if (&other != this) {
102 isValid_ = other.isValid_;
103 schemaType_ = other.schemaType_;
104 flatbufferSchema_.CopyFrom(other.flatbufferSchema_);
105 schemaString_ = other.schemaString_;
106 schemaVersion_ = other.schemaVersion_;
107 schemaMode_ = other.schemaMode_;
108 schemaSkipSize_ = other.schemaSkipSize_;
109 schemaIndexes_ = other.schemaIndexes_;
110 schemaDefine_ = other.schemaDefine_;
111 }
112 return *this;
113 }
114
115 #ifdef RELATIONAL_STORE
SchemaObject(const TableInfo & tableInfo)116 SchemaObject::SchemaObject(const TableInfo &tableInfo) : flatbufferSchema_(*this)
117 {
118 isValid_ = true;
119 schemaType_ = SchemaType::NONE; // Default NONE
120 schemaVersion_ = "1.0";
121 SchemaDefine schemaDefine = tableInfo.GetSchemaDefine();
122 schemaDefine_.insert({ 0, schemaDefine });
123 }
124 #endif // RELATIONAL_STORE
125
ParseFromSchemaString(const std::string & inSchemaString)126 int SchemaObject::ParseFromSchemaString(const std::string &inSchemaString)
127 {
128 if (isValid_) {
129 return -E_NOT_PERMIT;
130 }
131
132 // Judge whether it is FlatBuffer-Schema then check the schema-size first
133 SchemaType estimateType = SchemaType::JSON; // Estimate as JSON type firstly
134 std::string decoded;
135 if (FlatBufferSchema::IsFlatBufferSchema(inSchemaString, decoded)) {
136 estimateType = SchemaType::FLATBUFFER;
137 LOGD("[Schema][Parse] FlatBuffer-Type, Decode before=%zu, after=%zu.", inSchemaString.size(), decoded.size());
138 }
139 const std::string &oriSchema = ((estimateType == SchemaType::FLATBUFFER) ? decoded : inSchemaString);
140 if (oriSchema.size() > SchemaConstant::SCHEMA_STRING_SIZE_LIMIT) {
141 LOGE("[Schema][Parse] SchemaSize=%zu Too Large.", oriSchema.size());
142 return -E_INVALID_ARGS;
143 }
144
145 // Parse the corresponding type schema
146 if (estimateType == SchemaType::FLATBUFFER) {
147 int errCode = flatbufferSchema_.ParseFlatBufferSchema(oriSchema);
148 if (errCode != E_OK) {
149 return errCode;
150 }
151 DisplaySchemaLineByLine(SchemaType::FLATBUFFER, flatbufferSchema_.GetDescription());
152 schemaType_ = SchemaType::FLATBUFFER;
153 schemaString_ = oriSchema;
154 } else {
155 DisplaySchemaLineByLine(SchemaType::JSON, oriSchema);
156 JsonObject schemaJson;
157 int errCode = schemaJson.Parse(oriSchema);
158 if (errCode != E_OK) {
159 LOGE("[Schema][Parse] Json parse schema fail, errCode=%d, Not FlatBuffer Not Json.", errCode);
160 return errCode;
161 }
162 errCode = ParseJsonSchema(schemaJson);
163 if (errCode != E_OK) {
164 return errCode;
165 }
166 schemaType_ = SchemaType::JSON;
167 schemaString_ = schemaJson.ToString(); // Save the minify type of version string
168 }
169
170 isValid_ = true;
171 return E_OK;
172 }
173
IsSchemaValid() const174 bool SchemaObject::IsSchemaValid() const
175 {
176 return isValid_;
177 }
178
GetSchemaType() const179 SchemaType SchemaObject::GetSchemaType() const
180 {
181 return schemaType_;
182 }
183
ToSchemaString() const184 std::string SchemaObject::ToSchemaString() const
185 {
186 return schemaString_;
187 }
188
GetSkipSize() const189 uint32_t SchemaObject::GetSkipSize() const
190 {
191 return schemaSkipSize_;
192 }
193
GetIndexInfo() const194 std::map<IndexName, IndexInfo> SchemaObject::GetIndexInfo() const
195 {
196 if (!isValid_) {
197 // An invalid SchemaObject may contain some dirty info produced by failed parse.
198 return std::map<IndexName, IndexInfo>();
199 }
200 return schemaIndexes_;
201 }
202
IsIndexExist(const IndexName & indexName) const203 bool SchemaObject::IsIndexExist(const IndexName &indexName) const
204 {
205 if (!isValid_) {
206 return false;
207 }
208 return (schemaIndexes_.count(indexName) != 0);
209 }
210
CheckQueryableAndGetFieldType(const FieldPath & inPath,FieldType & outType) const211 int SchemaObject::CheckQueryableAndGetFieldType(const FieldPath &inPath, FieldType &outType) const
212 {
213 if (inPath.empty()) {
214 return -E_INVALID_ARGS;
215 }
216 if (schemaDefine_.count(inPath.size() - 1) == 0) {
217 return -E_NOT_FOUND;
218 }
219 if (schemaDefine_.at(inPath.size() - 1).count(inPath) == 0) {
220 return -E_NOT_FOUND;
221 }
222 const SchemaAttribute &targetAttr = schemaDefine_.at(inPath.size() - 1).at(inPath);
223 outType = targetAttr.type;
224 return (targetAttr.isIndexable ? E_OK : -E_NOT_SUPPORT);
225 }
226
CompareAgainstSchemaString(const std::string & inSchemaString) const227 int SchemaObject::CompareAgainstSchemaString(const std::string &inSchemaString) const
228 {
229 IndexDifference indexDiffer;
230 return CompareAgainstSchemaString(inSchemaString, indexDiffer);
231 }
232
CompareAgainstSchemaString(const std::string & inSchemaString,IndexDifference & indexDiffer) const233 int SchemaObject::CompareAgainstSchemaString(const std::string &inSchemaString, IndexDifference &indexDiffer) const
234 {
235 if (!isValid_) {
236 return -E_NOT_PERMIT;
237 }
238 SchemaObject newSchema;
239 int errCode = newSchema.ParseFromSchemaString(inSchemaString);
240 if (errCode != E_OK) {
241 return errCode;
242 }
243 return CompareAgainstSchemaObject(newSchema, indexDiffer);
244 }
245
CompareAgainstSchemaObject(const SchemaObject & inSchemaObject) const246 int SchemaObject::CompareAgainstSchemaObject(const SchemaObject &inSchemaObject) const
247 {
248 IndexDifference indexDiffer;
249 return CompareAgainstSchemaObject(inSchemaObject, indexDiffer);
250 }
251
CompareAgainstSchemaObject(const SchemaObject & inSchemaObject,IndexDifference & indexDiffer) const252 int SchemaObject::CompareAgainstSchemaObject(const SchemaObject &inSchemaObject, IndexDifference &indexDiffer) const
253 {
254 if (!isValid_ || !inSchemaObject.isValid_) {
255 return -E_NOT_PERMIT;
256 }
257 if (schemaType_ != inSchemaObject.schemaType_) {
258 LOGE("[Schema][Compare] Self is %s, other is %s.", SchemaUtils::SchemaTypeString(schemaType_).c_str(),
259 SchemaUtils::SchemaTypeString(inSchemaObject.schemaType_).c_str());
260 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
261 }
262
263 // Return E_SCHEMA_EQUAL_EXACTLY or E_SCHEMA_UNEQUAL_INCOMPATIBLE
264 int verModeResult = CompareSchemaVersionMode(inSchemaObject);
265 if (verModeResult == -E_SCHEMA_UNEQUAL_INCOMPATIBLE) {
266 return verModeResult;
267 }
268
269 // Return E_SCHEMA_EQUAL_EXACTLY or E_SCHEMA_UNEQUAL_INCOMPATIBLE
270 int skipSizeResult = CompareSchemaSkipSize(inSchemaObject);
271 if (skipSizeResult == -E_SCHEMA_UNEQUAL_INCOMPATIBLE) {
272 return skipSizeResult;
273 }
274
275 // Return E_SCHEMA_EQUAL_EXACTLY or E_SCHEMA_UNEQUAL_COMPATIBLE_UPGRADE or E_SCHEMA_UNEQUAL_INCOMPATIBLE
276 int defineResult;
277 if (schemaType_ == SchemaType::JSON) {
278 defineResult = CompareSchemaDefine(inSchemaObject);
279 } else {
280 defineResult = flatbufferSchema_.CompareFlatBufferDefine(inSchemaObject.flatbufferSchema_);
281 }
282 if (defineResult == -E_SCHEMA_UNEQUAL_INCOMPATIBLE) {
283 return defineResult;
284 }
285
286 // Return E_SCHEMA_EQUAL_EXACTLY or E_SCHEMA_UNEQUAL_COMPATIBLE
287 int indexResult = CompareSchemaIndexes(inSchemaObject, indexDiffer);
288 return ((defineResult == -E_SCHEMA_EQUAL_EXACTLY) ? indexResult : defineResult);
289 }
290
CheckValueAndAmendIfNeed(ValueSource sourceType,ValueObject & inValue) const291 int SchemaObject::CheckValueAndAmendIfNeed(ValueSource sourceType, ValueObject &inValue) const
292 {
293 if (!isValid_ || schemaType_ != SchemaType::JSON) { // Currently this methed only support Json-Schema
294 return -E_NOT_PERMIT;
295 }
296
297 std::set<FieldPath> lackingPaths;
298 int errCode = CheckValue(inValue, lackingPaths);
299 if (errCode != -E_VALUE_MATCH) {
300 return errCode;
301 }
302
303 bool amended = false;
304 errCode = AmendValueIfNeed(inValue, lackingPaths, amended);
305 if (errCode != E_OK) { // Unlikely
306 LOGE("[Schema][CheckAmend] Amend fail, errCode=%d, srcType=%d.", errCode, static_cast<int>(sourceType));
307 return -E_INTERNAL_ERROR;
308 }
309 return (amended ? -E_VALUE_MATCH_AMENDED : -E_VALUE_MATCH);
310 }
311
VerifyValue(ValueSource sourceType,const Value & inValue) const312 int SchemaObject::VerifyValue(ValueSource sourceType, const Value &inValue) const
313 {
314 return VerifyValue(sourceType, RawValue{inValue.data(), inValue.size()});
315 }
316
VerifyValue(ValueSource sourceType,const RawValue & inValue) const317 int SchemaObject::VerifyValue(ValueSource sourceType, const RawValue &inValue) const
318 {
319 if (inValue.first == nullptr) {
320 return -E_INVALID_ARGS;
321 }
322 if (!isValid_ || schemaType_ != SchemaType::FLATBUFFER) {
323 return -E_NOT_PERMIT;
324 }
325 if (inValue.second <= schemaSkipSize_) {
326 LOGE("[Schema][Verify] Value length=%zu invalid, skipsize=%u.", inValue.second, schemaSkipSize_);
327 return -E_FLATBUFFER_VERIFY_FAIL;
328 }
329
330 RawValue rawValue;
331 std::vector<uint8_t> cache;
332 if (schemaSkipSize_ % SchemaConstant::SECURE_BYTE_ALIGN == 0) {
333 rawValue = {inValue.first + schemaSkipSize_, inValue.second - schemaSkipSize_};
334 } else {
335 cache.assign(inValue.first + schemaSkipSize_, inValue.first + inValue.second);
336 rawValue = {cache.data(), cache.size()};
337 }
338
339 // Currently do not try no sizePrefix, future may depend on sourceType
340 int errCode = flatbufferSchema_.VerifyFlatBufferValue(rawValue, false);
341 if (errCode != E_OK) {
342 LOGE("[Schema][Verify] Value verify fail, srcType=%d.", static_cast<int>(sourceType));
343 return errCode;
344 }
345 return E_OK;
346 }
347
ExtractValue(ValueSource sourceType,RawString inPath,const RawValue & inValue,TypeValue & outExtract,std::vector<uint8_t> * cache) const348 int SchemaObject::ExtractValue(ValueSource sourceType, RawString inPath, const RawValue &inValue,
349 TypeValue &outExtract, std::vector<uint8_t> *cache) const
350 {
351 // NOTE!!! This function is performance sensitive !!! Carefully not to allocate memory often!!!
352 if (!isValid_ || schemaType_ != SchemaType::FLATBUFFER) {
353 return -E_NOT_PERMIT;
354 }
355 if (inPath == nullptr || inValue.first == nullptr) {
356 return -E_INVALID_ARGS;
357 }
358 if (inValue.second <= schemaSkipSize_) {
359 LOGE("[Schema][Extract] Value length=%u invalid, skipsize=%u.", inValue.second, schemaSkipSize_);
360 return -E_FLATBUFFER_VERIFY_FAIL;
361 }
362
363 RawValue rawValue;
364 std::vector<uint8_t> *tempCache = nullptr; // A temporary cache for use when input cache can not hold.
365 if (schemaSkipSize_ % SchemaConstant::SECURE_BYTE_ALIGN == 0) {
366 rawValue = {inValue.first + schemaSkipSize_, inValue.second - schemaSkipSize_};
367 } else if ((cache != nullptr) && (cache->size() >= (inValue.second - schemaSkipSize_))) {
368 // Do not expand the cache if it can not hold
369 cache->assign(inValue.first + schemaSkipSize_, inValue.first + inValue.second);
370 rawValue = {cache->data(), inValue.second - schemaSkipSize_}; // Attention: Do not use cache.size() as second.
371 } else {
372 // Use a temporary cache, which will release its memory quickly
373 tempCache = new (std::nothrow) std::vector<uint8_t>;
374 if (tempCache == nullptr) {
375 LOGE("[Schema][Extract] OOM.");
376 return -E_OUT_OF_MEMORY;
377 }
378 tempCache->resize(inValue.second - schemaSkipSize_);
379 tempCache->assign(inValue.first + schemaSkipSize_, inValue.first + inValue.second);
380 rawValue = {tempCache->data(), tempCache->size()};
381 }
382
383 // Currently do not try no sizePrefix, future may depend on sourceType
384 int errCode = flatbufferSchema_.ExtractFlatBufferValue(inPath, rawValue, outExtract, false);
385 if (errCode != E_OK) {
386 LOGE("[Schema][Extract] Fail, path=%s, srcType=%d.", inPath, static_cast<int>(sourceType));
387 }
388 delete tempCache; // delete nullptr is safe
389 tempCache = nullptr;
390 return errCode;
391 }
392
ParseJsonSchema(const JsonObject & inJsonObject)393 int SchemaObject::ParseJsonSchema(const JsonObject &inJsonObject)
394 {
395 // Parse and check mandatory metaField below
396 int errCode = CheckMetaFieldCountAndType(inJsonObject);
397 if (errCode != E_OK) {
398 return errCode;
399 }
400 errCode = ParseCheckSchemaVersionMode(inJsonObject);
401 if (errCode != E_OK) {
402 return errCode;
403 }
404 errCode = ParseCheckSchemaDefine(inJsonObject);
405 if (errCode != E_OK) {
406 return errCode;
407 }
408 // Parse and check optional metaField below
409 errCode = ParseCheckSchemaIndexes(inJsonObject);
410 if (errCode != E_OK) {
411 return errCode;
412 }
413 errCode = ParseCheckSchemaSkipSize(inJsonObject);
414 if (errCode != E_OK) {
415 return errCode;
416 }
417 return E_OK;
418 }
419
420 namespace {
CheckOptionalMetaFieldCountAndType(const std::map<FieldPath,FieldType> & metaFieldPathType)421 int CheckOptionalMetaFieldCountAndType(const std::map<FieldPath, FieldType> &metaFieldPathType)
422 {
423 uint32_t indexMetaFieldCount = 0;
424 uint32_t skipSizeMetaFieldCount = 0;
425 if (metaFieldPathType.count(FieldPath{SchemaConstant::KEYWORD_SCHEMA_INDEXES}) != 0) {
426 indexMetaFieldCount++;
427 FieldType type = metaFieldPathType.at(FieldPath{SchemaConstant::KEYWORD_SCHEMA_INDEXES});
428 if (type != FieldType::LEAF_FIELD_ARRAY) {
429 LOGE("[Schema][CheckMeta] Expect SCHEMA_INDEXES type ARRAY but %s.",
430 SchemaUtils::FieldTypeString(type).c_str());
431 return -E_SCHEMA_PARSE_FAIL;
432 }
433 }
434 if (metaFieldPathType.count(FieldPath{SchemaConstant::KEYWORD_SCHEMA_SKIPSIZE}) != 0) {
435 skipSizeMetaFieldCount++;
436 FieldType type = metaFieldPathType.at(FieldPath{SchemaConstant::KEYWORD_SCHEMA_SKIPSIZE});
437 if (type != FieldType::LEAF_FIELD_INTEGER) {
438 LOGE("[Schema][CheckMeta] Expect SCHEMA_SKIPSIZE type INTEGER but %s.",
439 SchemaUtils::FieldTypeString(type).c_str());
440 return -E_SCHEMA_PARSE_FAIL;
441 }
442 }
443 if (metaFieldPathType.size() != (SchemaConstant::SCHEMA_META_FEILD_COUNT_MIN + indexMetaFieldCount +
444 skipSizeMetaFieldCount)) {
445 LOGE("[Schema][CheckMeta] Unrecognized metaField exist: total=%u, indexField=%u, skipSizeField=%u.",
446 metaFieldPathType.size(), indexMetaFieldCount, skipSizeMetaFieldCount);
447 return -E_SCHEMA_PARSE_FAIL;
448 }
449 return E_OK;
450 }
451 }
452
CheckMetaFieldCountAndType(const JsonObject & inJsonObject) const453 int SchemaObject::CheckMetaFieldCountAndType(const JsonObject& inJsonObject) const
454 {
455 std::map<FieldPath, FieldType> metaFieldPathType;
456 int errCode = inJsonObject.GetSubFieldPathAndType(FieldPath(), metaFieldPathType);
457 if (errCode != E_OK) {
458 LOGE("[Schema][CheckMeta] GetSubFieldPathAndType fail, errCode=%d.", errCode);
459 return errCode;
460 }
461 if (metaFieldPathType.size() < SchemaConstant::SCHEMA_META_FEILD_COUNT_MIN ||
462 metaFieldPathType.size() > SchemaConstant::SCHEMA_META_FEILD_COUNT_MAX) {
463 LOGE("[Schema][CheckMeta] Unexpected metafield count=%zu.", metaFieldPathType.size());
464 return -E_SCHEMA_PARSE_FAIL;
465 }
466 // Check KeyWord SCHEMA_VERSION
467 if (metaFieldPathType.count(FieldPath{SchemaConstant::KEYWORD_SCHEMA_VERSION}) == 0) {
468 LOGE("[Schema][CheckMeta] Expect metafield SCHEMA_VERSION but not find.");
469 return -E_SCHEMA_PARSE_FAIL;
470 }
471 FieldType type = metaFieldPathType.at(FieldPath{SchemaConstant::KEYWORD_SCHEMA_VERSION});
472 if (type != FieldType::LEAF_FIELD_STRING) {
473 LOGE("[Schema][CheckMeta] Expect SCHEMA_VERSION type STRING but %s.",
474 SchemaUtils::FieldTypeString(type).c_str());
475 return -E_SCHEMA_PARSE_FAIL;
476 }
477 // Check KeyWord SCHEMA_MODE
478 if (metaFieldPathType.count(FieldPath{SchemaConstant::KEYWORD_SCHEMA_MODE}) == 0) {
479 LOGE("[Schema][CheckMeta] Expect metafield SCHEMA_MODE but not find.");
480 return -E_SCHEMA_PARSE_FAIL;
481 }
482 type = metaFieldPathType.at(FieldPath{SchemaConstant::KEYWORD_SCHEMA_MODE});
483 if (type != FieldType::LEAF_FIELD_STRING) {
484 LOGE("[Schema][CheckMeta] Expect SCHEMA_MODE type STRING but %s.", SchemaUtils::FieldTypeString(type).c_str());
485 return -E_SCHEMA_PARSE_FAIL;
486 }
487 // Check KeyWord SCHEMA_DEFINE
488 if (metaFieldPathType.count(FieldPath{SchemaConstant::KEYWORD_SCHEMA_DEFINE}) == 0) {
489 LOGE("[Schema][CheckMeta] Expect metafield SCHEMA_DEFINE but not find.");
490 return -E_SCHEMA_PARSE_FAIL;
491 }
492 type = metaFieldPathType.at(FieldPath{SchemaConstant::KEYWORD_SCHEMA_DEFINE});
493 if (type != FieldType::INTERNAL_FIELD_OBJECT) { // LEAF_FIELD_OBJECT indicate an empty object which is not allowed
494 LOGE("[Schema][CheckMeta] Expect SCHEMA_DEFINE type INTERNAL_OBJECT but %s.",
495 SchemaUtils::FieldTypeString(type).c_str());
496 return -E_SCHEMA_PARSE_FAIL;
497 }
498 // Check KeyWord SCHEMA_INDEXES If Need
499 return CheckOptionalMetaFieldCountAndType(metaFieldPathType);
500 }
501
ParseCheckSchemaVersionMode(const JsonObject & inJsonObject)502 int SchemaObject::ParseCheckSchemaVersionMode(const JsonObject& inJsonObject)
503 {
504 // Note: it has been checked in CheckMetaFieldCountAndType that SCHEMA_VERSION field exists and its type is string.
505 FieldValue versionValue;
506 int errCode = inJsonObject.GetFieldValueByFieldPath(FieldPath{SchemaConstant::KEYWORD_SCHEMA_VERSION},
507 versionValue);
508 if (errCode != E_OK) {
509 return -E_INTERNAL_ERROR;
510 }
511 if (SchemaUtils::Strip(versionValue.stringValue) != SchemaConstant::SCHEMA_SUPPORT_VERSION) {
512 LOGE("[Schema][ParseVerMode] Unexpected SCHEMA_VERSION=%s.", versionValue.stringValue.c_str());
513 return -E_SCHEMA_PARSE_FAIL;
514 }
515 schemaVersion_ = SchemaConstant::SCHEMA_SUPPORT_VERSION;
516
517 // Note: it has been checked in CheckMetaFieldCountAndType that SCHEMA_MODE field exists and its type is string.
518 FieldValue modeValue;
519 errCode = inJsonObject.GetFieldValueByFieldPath(FieldPath{SchemaConstant::KEYWORD_SCHEMA_MODE}, modeValue);
520 if (errCode != E_OK) {
521 return -E_INTERNAL_ERROR;
522 }
523 std::string modeStripped = SchemaUtils::Strip(modeValue.stringValue);
524 if (modeStripped != SchemaConstant::KEYWORD_MODE_STRICT &&
525 modeStripped != SchemaConstant::KEYWORD_MODE_COMPATIBLE) {
526 LOGE("[Schema][ParseVerMode] Unexpected SCHEMA_MODE=%s.", modeValue.stringValue.c_str());
527 return -E_SCHEMA_PARSE_FAIL;
528 }
529 schemaMode_ = ((modeStripped == SchemaConstant::KEYWORD_MODE_STRICT) ? SchemaMode::STRICT : SchemaMode::COMPATIBLE);
530 return E_OK;
531 }
532
ParseCheckSchemaDefine(const JsonObject & inJsonObject)533 int SchemaObject::ParseCheckSchemaDefine(const JsonObject& inJsonObject)
534 {
535 // Clear schemaDefine_ to recover from a fail parse
536 schemaDefine_.clear();
537 // Note: it has been checked in CheckMetaFieldCountAndType that SCHEMA_DEFINE field exists and its type is
538 // internal-object. Nest path refer to those field with type internal object that has sub field.
539 std::set<FieldPath> nestPathCurDepth{FieldPath{SchemaConstant::KEYWORD_SCHEMA_DEFINE}};
540 uint32_t fieldNameCount = 0;
541 for (uint32_t depth = 0; depth < SchemaConstant::SCHEMA_FEILD_PATH_DEPTH_MAX; depth++) {
542 std::map<FieldPath, FieldType> subPathType;
543 int errCode = inJsonObject.GetSubFieldPathAndType(nestPathCurDepth, subPathType);
544 if (errCode != E_OK) { // Unlikely
545 LOGE("[Schema][ParseDefine] Internal Error: GetSubFieldPathAndType Fail, Depth=%u.", depth);
546 return -E_INTERNAL_ERROR;
547 }
548 fieldNameCount += subPathType.size();
549 nestPathCurDepth.clear(); // Clear it for collecting new nestPath
550 for (const auto &subField : subPathType) {
551 SchemaAttribute attribute;
552 errCode = CheckSchemaDefineItemDecideAttribute(inJsonObject, subField.first, subField.second, attribute);
553 if (errCode != E_OK) {
554 LOGE("[Schema][ParseDefine] CheckSchemaDefineItemDecideAttribute Fail, Path=%s.",
555 SchemaUtils::FieldPathString(subField.first).c_str());
556 return -E_SCHEMA_PARSE_FAIL;
557 }
558 // If everything ok, insert this schema item into schema define
559 // Remember to remove SCHEMA_DEFINE in the front of the fieldpath
560 schemaDefine_[depth][FieldPath(++(subField.first.begin()), subField.first.end())] = attribute;
561 // Deal with the nestpath and check depth limitation
562 if (subField.second == FieldType::INTERNAL_FIELD_OBJECT) {
563 if (depth == SchemaConstant::SCHEMA_FEILD_PATH_DEPTH_MAX - 1) { // Minus 1 to be the boundary
564 LOGE("[Schema][ParseDefine] Path=%s is INTERNAL_FIELD_OBJECT but reach schema depth limitation.",
565 SchemaUtils::FieldPathString(subField.first).c_str());
566 return -E_SCHEMA_PARSE_FAIL;
567 }
568 nestPathCurDepth.insert(subField.first);
569 }
570 }
571 // If no deeper schema define, quit loop in advance
572 if (nestPathCurDepth.empty()) {
573 break;
574 }
575 }
576 if (fieldNameCount > SchemaConstant::SCHEMA_FEILD_NAME_COUNT_MAX) {
577 // Check Field Count Here
578 LOGE("[Schema][ParseDefine] FieldName count=%u exceed the limitation.", fieldNameCount);
579 return -E_SCHEMA_PARSE_FAIL;
580 }
581 return E_OK;
582 }
583
CheckSchemaDefineItemDecideAttribute(const JsonObject & inJsonObject,const FieldPath & inPath,FieldType inType,SchemaAttribute & outAttr) const584 int SchemaObject::CheckSchemaDefineItemDecideAttribute(const JsonObject& inJsonObject, const FieldPath &inPath,
585 FieldType inType, SchemaAttribute &outAttr) const
586 {
587 // Note: inPath will never be an empty vector, internal logic guarantee it, see the caller logic
588 if (inPath.empty()) { // Not Possible. Just For Clear CodeDEX.
589 return -E_INTERNAL_ERROR;
590 }
591 int errCode = SchemaUtils::CheckFieldName(inPath.back());
592 if (errCode != E_OK) {
593 LOGE("[Schema][CheckItemDecideAttr] Invalid fieldName=%s, errCode=%d.", inPath.back().c_str(), errCode);
594 return -E_SCHEMA_PARSE_FAIL;
595 }
596 if (inType == FieldType::LEAF_FIELD_STRING) {
597 FieldValue subFieldValue;
598 errCode = inJsonObject.GetFieldValueByFieldPath(inPath, subFieldValue);
599 if (errCode != E_OK) { // Unlikely
600 LOGE("[Schema][CheckItemDecideAttr] Internal Error: GetFieldValueByFieldPath Fail.");
601 return -E_INTERNAL_ERROR;
602 }
603 errCode = SchemaUtils::ParseAndCheckSchemaAttribute(subFieldValue.stringValue, outAttr);
604 if (errCode != E_OK) {
605 LOGE("[Schema][CheckItemDecideAttr] ParseAndCheckSchemaAttribute Fail, errCode=%d.", errCode);
606 return -E_SCHEMA_PARSE_FAIL;
607 }
608 // The ParseAndCheckSchemaAttribute do not cope with isIndexable field. Need to set it true here
609 outAttr.isIndexable = true;
610 } else if (inType == FieldType::LEAF_FIELD_ARRAY) {
611 uint32_t arraySize = 0;
612 errCode = inJsonObject.GetArraySize(inPath, arraySize);
613 if (errCode != E_OK) {
614 LOGE("[Schema][CheckItemDecideAttr] Internal Error: GetArraySize Fail.");
615 return -E_INTERNAL_ERROR;
616 }
617 if (arraySize != 0) {
618 LOGE("[Schema][CheckItemDecideAttr] Expect array empty but size=%u.", arraySize);
619 return -E_SCHEMA_PARSE_FAIL;
620 }
621 outAttr = SchemaAttribute{inType, false, false, false, FieldValue()};
622 } else if (inType == FieldType::LEAF_FIELD_OBJECT) {
623 outAttr = SchemaAttribute{inType, false, false, false, FieldValue()};
624 } else if (inType == FieldType::INTERNAL_FIELD_OBJECT) {
625 outAttr = SchemaAttribute{inType, false, false, false, FieldValue()}; // hasNotNull set false is OK for this
626 } else {
627 LOGE("[Schema][CheckItemDecideAttr] Unexpected FieldType=%s.", SchemaUtils::FieldTypeString(inType).c_str());
628 return -E_SCHEMA_PARSE_FAIL;
629 }
630 return E_OK;
631 }
632
ParseCheckSchemaIndexes(const JsonObject & inJsonObject)633 int SchemaObject::ParseCheckSchemaIndexes(const JsonObject& inJsonObject)
634 {
635 // Clear schemaIndexes_ to recover from a fail parse
636 schemaIndexes_.clear();
637 // No SCHEMA_INDEXES field is allowed
638 if (!inJsonObject.IsFieldPathExist(FieldPath{SchemaConstant::KEYWORD_SCHEMA_INDEXES})) {
639 LOGD("[Schema][ParseIndex] No SCHEMA_INDEXES Field.");
640 return E_OK;
641 }
642 // The type of SCHEMA_INDEXES field has been checked in CheckMetaFieldCountAndType to be an array
643 // If not all members of the array are string type or string-array, this call will return error
644 std::vector<std::vector<std::string>> oriIndexArray;
645 int errCode = inJsonObject.GetArrayContentOfStringOrStringArray(FieldPath{SchemaConstant::KEYWORD_SCHEMA_INDEXES},
646 oriIndexArray);
647 if (errCode != E_OK) {
648 LOGE("[Schema][ParseIndex] GetArrayContent Fail, errCode=%d.", errCode);
649 return -E_SCHEMA_PARSE_FAIL;
650 }
651 if (oriIndexArray.size() > SchemaConstant::SCHEMA_INDEX_COUNT_MAX) {
652 LOGE("[Schema][ParseIndex] Index(Ori) count=%zu exceed limitation.", oriIndexArray.size());
653 return -E_SCHEMA_PARSE_FAIL;
654 }
655 for (const auto &entry : oriIndexArray) {
656 errCode = ParseCheckEachIndexFromStringArray(entry);
657 if (errCode != E_OK) {
658 return errCode;
659 }
660 }
661 return E_OK;
662 }
663
ParseCheckSchemaSkipSize(const JsonObject & inJsonObject)664 int SchemaObject::ParseCheckSchemaSkipSize(const JsonObject& inJsonObject)
665 {
666 // No SCHEMA_SKIPSIZE field is allowed
667 if (!inJsonObject.IsFieldPathExist(FieldPath{SchemaConstant::KEYWORD_SCHEMA_SKIPSIZE})) {
668 LOGD("[Schema][ParseSkipSize] No SCHEMA_SKIPSIZE Field.");
669 return E_OK;
670 }
671 // The type of SCHEMA_SKIPSIZE field has been checked in CheckMetaFieldCountAndType to be an INTEGER
672 FieldValue skipSizeValue;
673 int errCode = inJsonObject.GetFieldValueByFieldPath(FieldPath {SchemaConstant::KEYWORD_SCHEMA_SKIPSIZE},
674 skipSizeValue);
675 if (errCode != E_OK) {
676 return -E_INTERNAL_ERROR;
677 }
678 if (skipSizeValue.integerValue < 0 ||
679 static_cast<uint32_t>(skipSizeValue.integerValue) > SchemaConstant::SCHEMA_SKIPSIZE_MAX) {
680 LOGE("[Schema][ParseSkipSize] Unexpected SCHEMA_SKIPSIZE=%d.", skipSizeValue.integerValue);
681 return -E_SCHEMA_PARSE_FAIL;
682 }
683 schemaSkipSize_ = static_cast<uint32_t>(skipSizeValue.integerValue);
684 return E_OK;
685 }
686
ParseCheckEachIndexFromStringArray(const std::vector<std::string> & inStrArray)687 int SchemaObject::ParseCheckEachIndexFromStringArray(const std::vector<std::string> &inStrArray)
688 {
689 std::vector<FieldPath> indexPathVec;
690 std::set<FieldPath> indexPathSet;
691 // Parse each indexFieldPathString and check duplication
692 for (const auto &eachPathStr : inStrArray) {
693 FieldPath eachPath;
694 int errCode = SchemaUtils::ParseAndCheckFieldPath(eachPathStr, eachPath);
695 if (errCode != E_OK) {
696 LOGE("[Schema][ParseEachIndex] IndexPath=%s Invalid.", eachPathStr.c_str());
697 return -E_SCHEMA_PARSE_FAIL;
698 }
699 if (eachPath.size() == 0 || eachPath.size() > SchemaConstant::SCHEMA_FEILD_PATH_DEPTH_MAX) {
700 LOGE("[Schema][ParseEachIndex] Root not indexable or path=%s depth exceed limit.", eachPathStr.c_str());
701 return -E_SCHEMA_PARSE_FAIL;
702 }
703 if (indexPathSet.count(eachPath) != 0) {
704 LOGE("[Schema][ParseEachIndex] IndexPath=%s Duplicated.", eachPathStr.c_str());
705 return -E_SCHEMA_PARSE_FAIL;
706 }
707 indexPathVec.push_back(eachPath);
708 indexPathSet.insert(eachPath);
709 }
710 if (indexPathVec.empty()) { // Unlikely, empty JsonArray had been eliminated by GetArrayContent Method
711 return -E_INTERNAL_ERROR;
712 }
713 // Check indexDefine duplication, Use Sort-Column(the first fieldPath in index) as the indexName.
714 const IndexName &indexName = indexPathVec.front();
715 if (schemaIndexes_.count(indexName) != 0) {
716 LOGE("[Schema][ParseEachIndex] IndexName=%s Already Defined.", SchemaUtils::FieldPathString(indexName).c_str());
717 return -E_SCHEMA_PARSE_FAIL;
718 }
719 // Create new indexInfo entry, then check indexable for each indexFieldPath against schemaDefine
720 return CheckFieldPathIndexableThenSave(indexPathVec, schemaIndexes_[indexName]);
721 }
722
CheckFieldPathIndexableThenSave(const std::vector<FieldPath> & inPathVec,IndexInfo & infoToSave)723 int SchemaObject::CheckFieldPathIndexableThenSave(const std::vector<FieldPath> &inPathVec, IndexInfo &infoToSave)
724 {
725 for (const auto &eachPath : inPathVec) {
726 // Previous logic guarantee eachPath.size greater than zero
727 uint32_t depth = eachPath.size() - 1; // minus 1 to change depth count from zero
728 std::string eachPathStr = SchemaUtils::FieldPathString(eachPath);
729 if (schemaDefine_.count(depth) == 0) {
730 LOGE("[Schema][CheckIndexable] No schema define of this depth, path=%s.", eachPathStr.c_str());
731 return -E_SCHEMA_PARSE_FAIL;
732 }
733 if (schemaDefine_[depth].count(eachPath) == 0) {
734 LOGE("[Schema][CheckIndexable] No such path in schema define, path=%s.", eachPathStr.c_str());
735 return -E_SCHEMA_PARSE_FAIL;
736 }
737 if (!schemaDefine_[depth][eachPath].isIndexable) {
738 LOGE("[Schema][CheckIndexable] Path=%s is not indexable.", eachPathStr.c_str());
739 return -E_SCHEMA_PARSE_FAIL;
740 }
741 // Save this indexField to indexInfo
742 infoToSave.push_back({eachPath, schemaDefine_[depth][eachPath].type});
743 }
744 return E_OK;
745 }
746
CompareSchemaVersionMode(const SchemaObject & newSchema) const747 int SchemaObject::CompareSchemaVersionMode(const SchemaObject &newSchema) const
748 {
749 static std::map<SchemaMode, std::string> modeMapString = {
750 {SchemaMode::STRICT, "STRICT"},
751 {SchemaMode::COMPATIBLE, "COMPATIBLE"},
752 };
753 if (schemaVersion_ != newSchema.schemaVersion_) {
754 LOGE("[Schema][CompareVerMode] OldVer=%s mismatch newVer=%s.", schemaVersion_.c_str(),
755 newSchema.schemaVersion_.c_str());
756 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
757 }
758 // Only Json-Schema need to compare mode
759 if (schemaType_ == SchemaType::JSON && schemaMode_ != newSchema.schemaMode_) {
760 LOGE("[Schema][CompareVerMode] OldMode=%s mismatch newMode=%s.", modeMapString[schemaMode_].c_str(),
761 modeMapString[newSchema.schemaMode_].c_str());
762 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
763 }
764 // Do not return E_OK here, E_OK is ambiguous.
765 return -E_SCHEMA_EQUAL_EXACTLY;
766 }
767
CompareSchemaSkipSize(const SchemaObject & newSchema) const768 int SchemaObject::CompareSchemaSkipSize(const SchemaObject &newSchema) const
769 {
770 if (schemaSkipSize_ != newSchema.schemaSkipSize_) {
771 LOGE("[Schema][CompareSkipSize] OldSkip=%u mismatch newSkip=%u.", schemaSkipSize_, newSchema.schemaSkipSize_);
772 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
773 }
774 // Do not return E_OK here, E_OK is ambiguous.
775 return -E_SCHEMA_EQUAL_EXACTLY;
776 }
777
CompareSchemaDefine(const SchemaObject & newSchema) const778 int SchemaObject::CompareSchemaDefine(const SchemaObject &newSchema) const
779 {
780 bool isEqualExactly = true;
781 for (uint32_t depth = 0; depth < SchemaConstant::SCHEMA_FEILD_PATH_DEPTH_MAX; depth++) {
782 SchemaDefine emptyDefine;
783 const SchemaDefine &defineInOldSchema =
784 (schemaDefine_.count(depth) == 0 ? emptyDefine : schemaDefine_.at(depth));
785 const SchemaDefine &defineInNewSchema =
786 (newSchema.schemaDefine_.count(depth) == 0 ? emptyDefine : newSchema.schemaDefine_.at(depth));
787
788 // No define at this depth for both schema
789 if (defineInNewSchema.empty() && defineInOldSchema.empty()) {
790 break;
791 }
792 // No matter strict or compatible mode, newSchema can't have less field than oldSchema
793 if (defineInNewSchema.size() < defineInOldSchema.size()) {
794 LOGE("[Schema][CompareDefine] newSize=%u less than oldSize=%u at depth=%u.", defineInNewSchema.size(),
795 defineInOldSchema.size(), depth);
796 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
797 }
798 if (defineInNewSchema.size() > defineInOldSchema.size()) {
799 // Strict mode not support increase fieldDefine
800 if (schemaMode_ == SchemaMode::STRICT) {
801 LOGE("[Schema][CompareDefine] newSize=%u more than oldSize=%u at depth=%u in STRICT mode.",
802 defineInNewSchema.size(), defineInOldSchema.size(), depth);
803 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
804 }
805 isEqualExactly = false;
806 }
807
808 // Compare schema define of this depth, looking for incompatible
809 int errCode = CompareSchemaDefineByDepth(defineInOldSchema, defineInNewSchema);
810 if (errCode == -E_SCHEMA_UNEQUAL_INCOMPATIBLE) {
811 return errCode;
812 }
813 }
814 // Do not return E_OK here, E_OK is ambiguous.
815 return (isEqualExactly ? -E_SCHEMA_EQUAL_EXACTLY : -E_SCHEMA_UNEQUAL_COMPATIBLE_UPGRADE);
816 }
817
818 namespace {
IsExtraFieldConformToCompatibility(const SchemaAttribute & inAttr)819 inline bool IsExtraFieldConformToCompatibility(const SchemaAttribute &inAttr)
820 {
821 return (!inAttr.hasNotNullConstraint || inAttr.hasDefaultValue);
822 }
823 }
824
CompareSchemaDefineByDepth(const SchemaDefine & oldDefine,const SchemaDefine & newDefine) const825 int SchemaObject::CompareSchemaDefineByDepth(const SchemaDefine &oldDefine, const SchemaDefine &newDefine) const
826 {
827 // Looking for incompatible : new define should at least contain all field the old define hold
828 for (auto &entry : oldDefine) {
829 if (newDefine.count(entry.first) == 0) {
830 LOGE("[Schema][CompareDefineDepth] fieldpath=%s not found in new schema.",
831 SchemaUtils::FieldPathString(entry.first).c_str());
832 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
833 }
834 // SchemaAttribute require to be equal exactly
835 int errCode = CompareSchemaAttribute(entry.second, newDefine.at(entry.first));
836 if (errCode != -E_SCHEMA_EQUAL_EXACTLY) {
837 LOGE("[Schema][CompareDefineDepth] Attribute mismatch at fieldpath=%s.",
838 SchemaUtils::FieldPathString(entry.first).c_str());
839 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
840 }
841 }
842 // Looking for incompatible : the extra field in new schema should has default or can be null
843 for (auto &entry : newDefine) {
844 if (oldDefine.count(entry.first) != 0) {
845 continue;
846 }
847 if (!IsExtraFieldConformToCompatibility(entry.second)) {
848 LOGE("[Schema][CompareDefineDepth] ExtraField=%s, {notnull=%d, default=%d}, not conform compatibility.",
849 SchemaUtils::FieldPathString(entry.first).c_str(), entry.second.hasNotNullConstraint,
850 entry.second.hasDefaultValue);
851 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
852 }
853 }
854 return -E_SCHEMA_EQUAL_EXACTLY;
855 }
856
CompareSchemaAttribute(const SchemaAttribute & oldAttr,const SchemaAttribute & newAttr) const857 int SchemaObject::CompareSchemaAttribute(const SchemaAttribute &oldAttr, const SchemaAttribute &newAttr) const
858 {
859 if (oldAttr.type != newAttr.type) {
860 // The exceptional case is that the field type changed from the leaf_object to internal_object,
861 // which indicate that sub fields are added to it. Changed from internal_object to leaf_object will
862 // sooner or later cause an incompatible detection in next depth, we discern this situation here in advance.
863 if (!(oldAttr.type == FieldType::LEAF_FIELD_OBJECT && newAttr.type == FieldType::INTERNAL_FIELD_OBJECT)) {
864 LOGE("[Schema][CompareAttr] OldType=%s mismatch newType=%s.",
865 SchemaUtils::FieldTypeString(oldAttr.type).c_str(), SchemaUtils::FieldTypeString(newAttr.type).c_str());
866 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
867 }
868 }
869 // Here we use isIndexable info to distinguish two categories of type.
870 // "BOOL, INTEGER, LONG, DOUBLE, STRING" are all indexable type, NULL type will not appear in Schema
871 // "ARRAY, LEAF_OBJECT, INTERNAL_OBJECT" are all not indexable type.
872 // They have been checked same type just above. No need to check more for not indexable type
873 if (oldAttr.isIndexable) {
874 if (oldAttr.hasNotNullConstraint != newAttr.hasNotNullConstraint) {
875 LOGE("[Schema][CompareAttr] OldNotNull=%d mismatch newNotNull=%d.", oldAttr.hasNotNullConstraint,
876 newAttr.hasNotNullConstraint);
877 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
878 }
879 if (oldAttr.hasDefaultValue != newAttr.hasDefaultValue) {
880 LOGE("[Schema][CompareAttr] OldHasDefault=%d mismatch newHasDefault=%d.", oldAttr.hasDefaultValue,
881 newAttr.hasDefaultValue);
882 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
883 }
884 if (oldAttr.hasDefaultValue) {
885 // DefaultValue require to be equal exactly
886 int errCode = CompareSchemaDefaultValue(oldAttr, newAttr);
887 if (errCode != -E_SCHEMA_EQUAL_EXACTLY) {
888 return errCode;
889 }
890 }
891 }
892 return -E_SCHEMA_EQUAL_EXACTLY;
893 }
894
895 namespace {
IsDoubleBinaryEqual(double left,double right)896 inline bool IsDoubleBinaryEqual(double left, double right)
897 {
898 return *(reinterpret_cast<uint64_t *>(&left)) == *(reinterpret_cast<uint64_t *>(&right));
899 }
900 }
901
CompareSchemaDefaultValue(const SchemaAttribute & oldAttr,const SchemaAttribute & newAttr) const902 int SchemaObject::CompareSchemaDefaultValue(const SchemaAttribute &oldAttr, const SchemaAttribute &newAttr) const
903 {
904 // Value type has been check equal for both attribute in the caller
905 if (oldAttr.type == FieldType::LEAF_FIELD_BOOL) {
906 if (oldAttr.defaultValue.boolValue != newAttr.defaultValue.boolValue) {
907 LOGE("[Schema][CompareDefault] OldDefault=%d mismatch newDefault=%d.", oldAttr.defaultValue.boolValue,
908 newAttr.defaultValue.boolValue);
909 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
910 }
911 } else if (oldAttr.type == FieldType::LEAF_FIELD_INTEGER) {
912 if (oldAttr.defaultValue.integerValue != newAttr.defaultValue.integerValue) {
913 LOGE("[Schema][CompareDefault] OldDefault=%d mismatch newDefault=%d.", oldAttr.defaultValue.integerValue,
914 newAttr.defaultValue.integerValue);
915 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
916 }
917 } else if (oldAttr.type == FieldType::LEAF_FIELD_LONG) {
918 if (oldAttr.defaultValue.longValue != newAttr.defaultValue.longValue) {
919 LOGE("[Schema][CompareDefault] OldDefault=%lld mismatch newDefault=%lld.", oldAttr.defaultValue.longValue,
920 newAttr.defaultValue.longValue);
921 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
922 }
923 } else if (oldAttr.type == FieldType::LEAF_FIELD_DOUBLE) {
924 // ATTENTION: Here we should compare two double by their binary layout. We should not judge them equal when
925 // difference is small enough, since two different default double value may diff so little. The binary
926 // layout of the double value will be the same if the original string is the same, so we directly compare them.
927 if (!IsDoubleBinaryEqual(oldAttr.defaultValue.doubleValue, newAttr.defaultValue.doubleValue)) {
928 LOGE("[Schema][CompareDefault] OldDefault=%f mismatch newDefault=%f.", oldAttr.defaultValue.doubleValue,
929 newAttr.defaultValue.doubleValue);
930 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
931 }
932 } else if (oldAttr.type == FieldType::LEAF_FIELD_STRING) {
933 if (oldAttr.defaultValue.stringValue != newAttr.defaultValue.stringValue) {
934 LOGE("[Schema][CompareDefault] OldDefault=%s mismatch newDefault=%s.",
935 oldAttr.defaultValue.stringValue.c_str(), newAttr.defaultValue.stringValue.c_str());
936 return -E_SCHEMA_UNEQUAL_INCOMPATIBLE;
937 }
938 }
939 // The caller logic guarantee that both attribute type will not be null, array, object
940 return -E_SCHEMA_EQUAL_EXACTLY;
941 }
942
943 namespace {
ClearIndexDifference(IndexDifference & indexDiffer)944 inline void ClearIndexDifference(IndexDifference &indexDiffer)
945 {
946 indexDiffer.change.clear();
947 indexDiffer.increase.clear();
948 indexDiffer.decrease.clear();
949 }
950
IsIndexInfoExactlyEqual(const IndexInfo & leftInfo,const IndexInfo & rightInfo)951 inline bool IsIndexInfoExactlyEqual(const IndexInfo &leftInfo, const IndexInfo &rightInfo)
952 {
953 // Exactly equal require count, order and type of each indexField in the index be the same
954 return leftInfo == rightInfo;
955 }
956
IsSchemaIndexesExactlyEqual(const IndexDifference & indexDiffer)957 inline bool IsSchemaIndexesExactlyEqual(const IndexDifference &indexDiffer)
958 {
959 return (indexDiffer.change.empty() && indexDiffer.increase.empty() && indexDiffer.decrease.empty());
960 }
961 }
962
CompareSchemaIndexes(const SchemaObject & newSchema,IndexDifference & indexDiffer) const963 int SchemaObject::CompareSchemaIndexes(const SchemaObject &newSchema, IndexDifference &indexDiffer) const
964 {
965 ClearIndexDifference(indexDiffer);
966 // Find the increase and change index
967 for (const auto &entry : newSchema.schemaIndexes_) {
968 if (schemaIndexes_.count(entry.first) == 0) {
969 LOGD("[Schema][CompareIndex] Increase indexName=%s.", SchemaUtils::FieldPathString(entry.first).c_str());
970 indexDiffer.increase[entry.first] = entry.second;
971 } else {
972 // Both schema have same IndexName, Check whether indexInfo differs
973 if (!IsIndexInfoExactlyEqual(entry.second, schemaIndexes_.at(entry.first))) {
974 LOGD("[Schema][CompareIndex] Change indexName=%s.", SchemaUtils::FieldPathString(entry.first).c_str());
975 indexDiffer.change[entry.first] = entry.second;
976 }
977 }
978 }
979 // Find the decrease index
980 for (const auto &entry : schemaIndexes_) {
981 if (newSchema.schemaIndexes_.count(entry.first) == 0) {
982 LOGD("[Schema][CompareIndex] Decrease indexName=%s.", SchemaUtils::FieldPathString(entry.first).c_str());
983 indexDiffer.decrease.insert(entry.first);
984 }
985 }
986 // Do not return E_OK here, E_OK is ambiguous.
987 return IsSchemaIndexesExactlyEqual(indexDiffer) ? -E_SCHEMA_EQUAL_EXACTLY : -E_SCHEMA_UNEQUAL_COMPATIBLE;
988 }
989
990 namespace {
CheckValueItemNumericType(FieldType typeInValue,FieldType typeInSchema)991 int CheckValueItemNumericType(FieldType typeInValue, FieldType typeInSchema)
992 {
993 if (typeInValue == FieldType::LEAF_FIELD_DOUBLE) {
994 if (typeInSchema != FieldType::LEAF_FIELD_DOUBLE) {
995 return -E_VALUE_MISMATCH_FEILD_TYPE;
996 }
997 } else if (typeInValue == FieldType::LEAF_FIELD_LONG) {
998 if (typeInSchema != FieldType::LEAF_FIELD_LONG &&
999 typeInSchema != FieldType::LEAF_FIELD_DOUBLE) {
1000 return -E_VALUE_MISMATCH_FEILD_TYPE;
1001 }
1002 } else {
1003 // LEAF_FIELD_INTEGER
1004 if (typeInSchema != FieldType::LEAF_FIELD_INTEGER &&
1005 typeInSchema != FieldType::LEAF_FIELD_LONG &&
1006 typeInSchema != FieldType::LEAF_FIELD_DOUBLE) {
1007 return -E_VALUE_MISMATCH_FEILD_TYPE;
1008 }
1009 }
1010 return -E_VALUE_MATCH;
1011 }
1012
IsTypeMustBeExactlyEqualBetweenSchemaAndValue(FieldType inType)1013 inline bool IsTypeMustBeExactlyEqualBetweenSchemaAndValue(FieldType inType)
1014 {
1015 return (inType == FieldType::LEAF_FIELD_BOOL ||
1016 inType == FieldType::LEAF_FIELD_STRING ||
1017 inType == FieldType::LEAF_FIELD_ARRAY);
1018 }
1019
IsObjectType(FieldType inType)1020 inline bool IsObjectType(FieldType inType)
1021 {
1022 return (inType == FieldType::LEAF_FIELD_OBJECT || inType == FieldType::INTERNAL_FIELD_OBJECT);
1023 }
1024
1025 // Check in the value-view for convenience
CheckValueItem(const SchemaAttribute & refAttr,FieldType typeInValue)1026 int CheckValueItem(const SchemaAttribute &refAttr, FieldType typeInValue)
1027 {
1028 FieldType typeInSchema = refAttr.type;
1029 if (typeInSchema == FieldType::LEAF_FIELD_NULL) { // Unlikely
1030 return -E_INTERNAL_ERROR;
1031 }
1032 // Check NotNull-Constraint first
1033 if (typeInValue == FieldType::LEAF_FIELD_NULL) {
1034 if (refAttr.hasNotNullConstraint) {
1035 return -E_VALUE_MISMATCH_CONSTRAINT;
1036 }
1037 return -E_VALUE_MATCH;
1038 }
1039 // If typeInValue not NULL, check against schema. First check type that must be equal.
1040 if (IsTypeMustBeExactlyEqualBetweenSchemaAndValue(typeInValue)) {
1041 if (typeInValue != typeInSchema) {
1042 return -E_VALUE_MISMATCH_FEILD_TYPE;
1043 }
1044 return -E_VALUE_MATCH;
1045 }
1046 // Check Object related type, lack or more field will be deal with at next depth
1047 // typeInSchema/typeInValue LEAF_OBJECT INTERNAL_OBJECT
1048 // LEAF_OBJECT MATCH MATCH(More field at next depth)
1049 // INTERNAL_OBJECT MATCH(Lack field at next depth) MATCH
1050 // ELSE(POSSIBLE) TYPE_MISMATCH TYPE_MISMATCH
1051 if (IsObjectType(typeInValue)) {
1052 if (!IsObjectType(typeInSchema)) {
1053 return -E_VALUE_MISMATCH_FEILD_TYPE;
1054 }
1055 return -E_VALUE_MATCH;
1056 }
1057 // Check Numeric related type, at last
1058 return CheckValueItemNumericType(typeInValue, typeInSchema);
1059 }
1060
IsLackingFieldViolateNotNullConstraint(const SchemaAttribute & refAttr)1061 inline bool IsLackingFieldViolateNotNullConstraint(const SchemaAttribute &refAttr)
1062 {
1063 return (refAttr.hasNotNullConstraint && !refAttr.hasDefaultValue);
1064 }
1065
1066 // Function only for split big function
CheckValueBySchemaItem(const std::pair<FieldPath,SchemaAttribute> & schemaItem,const std::map<FieldPath,FieldType> & subPathType,std::set<FieldPath> & lackingPaths)1067 int CheckValueBySchemaItem(const std::pair<FieldPath, SchemaAttribute> &schemaItem,
1068 const std::map<FieldPath, FieldType> &subPathType, std::set<FieldPath> &lackingPaths)
1069 {
1070 if (subPathType.count(schemaItem.first) == 0) { // Value do not contain this field
1071 if (IsLackingFieldViolateNotNullConstraint(schemaItem.second)) {
1072 return -E_VALUE_MISMATCH_CONSTRAINT;
1073 }
1074 lackingPaths.insert(schemaItem.first);
1075 return -E_VALUE_MATCH;
1076 }
1077 // Value contain this field, check its type
1078 return CheckValueItem(schemaItem.second, subPathType.at(schemaItem.first));
1079 }
1080
ValueFieldType(const std::map<FieldPath,FieldType> & subPathType,const FieldPath & inPath)1081 inline std::string ValueFieldType(const std::map<FieldPath, FieldType> &subPathType, const FieldPath &inPath)
1082 {
1083 if (subPathType.count(inPath) == 0) {
1084 return "NotExist";
1085 }
1086 return SchemaUtils::FieldTypeString(subPathType.at(inPath));
1087 }
1088 }
1089
CheckValue(const ValueObject & inValue,std::set<FieldPath> & lackingPaths) const1090 int SchemaObject::CheckValue(const ValueObject &inValue, std::set<FieldPath> &lackingPaths) const
1091 {
1092 std::set<FieldPath> nestPathCurDepth{FieldPath()}; // Empty path represent root path
1093 for (uint32_t depth = 0; depth < SchemaConstant::SCHEMA_FEILD_PATH_DEPTH_MAX; depth++) {
1094 if (schemaDefine_.count(depth) == 0 || schemaDefine_.at(depth).empty()) { // No schema define in this depth
1095 break;
1096 }
1097
1098 std::map<FieldPath, FieldType> subPathType;
1099 int errCode = inValue.GetSubFieldPathAndType(nestPathCurDepth, subPathType); // Value field of current depth
1100 if (errCode != E_OK && errCode != -E_INVALID_PATH) { // E_INVALID_PATH for path not exist
1101 LOGE("[Schema][CheckValue] GetSubFieldPathAndType Fail=%d, Depth=%u.", errCode, depth);
1102 return -E_VALUE_MISMATCH_FEILD_TYPE;
1103 }
1104 nestPathCurDepth.clear(); // Clear it for collecting new nestPath
1105
1106 if ((schemaMode_ == SchemaMode::STRICT) && (subPathType.size() > schemaDefine_.at(depth).size())) {
1107 LOGE("[Schema][CheckValue] ValueFieldCount=%zu more than SchemaFieldCount=%zu at depth=%u",
1108 subPathType.size(), schemaDefine_.at(depth).size(), depth);
1109 return -E_VALUE_MISMATCH_FEILD_COUNT; // Value contain more field than schema
1110 }
1111
1112 for (const auto &schemaItem : schemaDefine_.at(depth)) { // Check each field define in schema
1113 if (schemaItem.second.type == FieldType::INTERNAL_FIELD_OBJECT) {
1114 nestPathCurDepth.insert(schemaItem.first); // This field has subfield in schema
1115 }
1116 errCode = CheckValueBySchemaItem(schemaItem, subPathType, lackingPaths);
1117 if (errCode != -E_VALUE_MATCH) {
1118 LOGE("[Schema][CheckValue] Path=%s, schema{NotNull=%d,Default=%d,Type=%s}, Value{Type=%s}, errCode=%d.",
1119 SchemaUtils::FieldPathString(schemaItem.first).c_str(), schemaItem.second.hasNotNullConstraint,
1120 schemaItem.second.hasDefaultValue, SchemaUtils::FieldTypeString(schemaItem.second.type).c_str(),
1121 ValueFieldType(subPathType, schemaItem.first).c_str(), errCode);
1122 return errCode;
1123 }
1124 }
1125 }
1126 return -E_VALUE_MATCH;
1127 }
1128
AmendValueIfNeed(ValueObject & inValue,const std::set<FieldPath> & lackingPaths,bool & amended) const1129 int SchemaObject::AmendValueIfNeed(ValueObject &inValue, const std::set<FieldPath> &lackingPaths, bool &amended) const
1130 {
1131 for (const auto &eachLackingPath : lackingPaths) {
1132 // Note: The upper code logic guarantee that eachLackingPath won't be empty and must exist in schemaDefine_
1133 uint32_t depth = eachLackingPath.size() - 1; // Depth count from zero
1134 const SchemaAttribute &lackingPathAttr = schemaDefine_.at(depth).at(eachLackingPath);
1135 // If no default value, just ignore this lackingPath
1136 if (!lackingPathAttr.hasDefaultValue) {
1137 continue;
1138 }
1139 // If has default value, the ParseSchema logic guarantee that fieldType won't be NULL, ARRAY or OBJECT
1140 // The lacking intermediate field will be automatically insert for this lackingPath
1141 int errCode = inValue.InsertField(eachLackingPath, lackingPathAttr.type, lackingPathAttr.defaultValue);
1142 if (errCode != E_OK) { // Unlikely
1143 LOGE("[Schema][AmendValue] InsertField fail, errCode=%d, Path=%s, Type=%s.", errCode,
1144 SchemaUtils::FieldPathString(eachLackingPath).c_str(),
1145 SchemaUtils::FieldTypeString(lackingPathAttr.type).c_str());
1146 return -E_INTERNAL_ERROR;
1147 }
1148 amended = true;
1149 }
1150 return E_OK;
1151 }
1152 } // namespace DistributedDB
1153