• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "util/hap_signing_block_utils.h"
17 
18 #include <atomic>
19 #include <climits>
20 #include <parameters.h>
21 #include <thread>
22 #include <vector>
23 
24 #include "algorithm"
25 #include "common/hap_byte_buffer_data_source.h"
26 #include "common/hap_file_data_source.h"
27 #include "common/hap_verify_log.h"
28 #include "openssl/evp.h"
29 #include "securec.h"
30 #include "util/hap_verify_openssl_utils.h"
31 
32 namespace OHOS {
33 namespace Security {
34 namespace Verify {
35 const long long HapSigningBlockUtils::HAP_SIG_BLOCK_MAGIC_LOW_OLD = 2334950737560224072LL;
36 const long long HapSigningBlockUtils::HAP_SIG_BLOCK_MAGIC_HIGH_OLD = 3617552046287187010LL;
37 const long long HapSigningBlockUtils::HAP_SIG_BLOCK_MAGIC_LOW = 7451613641622775868LL;
38 const long long HapSigningBlockUtils::HAP_SIG_BLOCK_MAGIC_HIGH = 4497797983070462062LL;
39 
40 /* 1MB = 1024 * 1024 Bytes */
41 const long long HapSigningBlockUtils::CHUNK_SIZE = 1048576LL;
42 const long long HapSigningBlockUtils::SMALL_FILE_SIZE = CHUNK_SIZE * 2;
43 
44 const int32_t HapSigningBlockUtils::HAP_SIG_BLOCK_MIN_SIZE = 32;
45 const int32_t HapSigningBlockUtils::ZIP_HEAD_OF_SIGNING_BLOCK_LENGTH = 32;
46 
47 const int32_t HapSigningBlockUtils::ZIP_EOCD_SEG_MIN_SIZE = 22;
48 const int32_t HapSigningBlockUtils::ZIP_EOCD_SEGMENT_FLAG = 0x06054b50;
49 const int32_t HapSigningBlockUtils::ZIP_EOCD_COMMENT_LENGTH_OFFSET = 20;
50 const int32_t HapSigningBlockUtils::ZIP_CD_OFFSET_IN_EOCD = 16;
51 const int32_t HapSigningBlockUtils::ZIP_CD_SIZE_OFFSET_IN_EOCD = 12;
52 const int32_t HapSigningBlockUtils::ZIP_BLOCKS_NUM_NEED_DIGEST = 3;
53 const int32_t HapSigningBlockUtils::ZIP_UPDATE_DIGEST_THREADS_NUM = 4;
54 
55 const char HapSigningBlockUtils::ZIP_FIRST_LEVEL_CHUNK_PREFIX = 0x5a;
56 const char HapSigningBlockUtils::ZIP_SECOND_LEVEL_CHUNK_PREFIX = 0xa5;
57 
58 /*
59  * The package of hap is ZIP format, and contains four segments: contents of Zip entry,
60  * hap signatures block, central directory and end of central directory.
61  * The function will find the data segment of hap signature block from hap file.
62  */
FindHapSignature(RandomAccessFile & hapFile,SignatureInfo & signInfo)63 bool HapSigningBlockUtils::FindHapSignature(RandomAccessFile& hapFile, SignatureInfo& signInfo)
64 {
65     std::pair<HapByteBuffer, long long> eocdAndOffsetInFile;
66     if (!FindEocdInHap(hapFile, eocdAndOffsetInFile)) {
67         HAPVERIFY_LOG_ERROR("find EoCD failed");
68         return false;
69     }
70 
71     signInfo.hapEocd = eocdAndOffsetInFile.first;
72     signInfo.hapEocdOffset = eocdAndOffsetInFile.second;
73     if (!GetCentralDirectoryOffset(signInfo.hapEocd, signInfo.hapEocdOffset, signInfo.hapCentralDirOffset)) {
74         HAPVERIFY_LOG_ERROR("get CD offset failed");
75         return false;
76     }
77 
78     if (!FindHapSigningBlock(hapFile, signInfo.hapCentralDirOffset, signInfo)) {
79         HAPVERIFY_LOG_ERROR("find signing block failed");
80         return false;
81     }
82     return true;
83 }
84 
FindEocdInHap(RandomAccessFile & hapFile,std::pair<HapByteBuffer,long long> & eocd)85 bool HapSigningBlockUtils::FindEocdInHap(RandomAccessFile& hapFile, std::pair<HapByteBuffer, long long>& eocd)
86 {
87     /*
88      * EoCD has an optional comment block. Most hap packages do not contain this block.
89      * For hap packages without comment block, EoCD is the last 22 bytes of hap file.
90      * Try as a hap without comment block first to avoid unnecessarily reading more data.
91      */
92     if (FindEocdInHap(hapFile, 0, eocd)) {
93         HAPVERIFY_LOG_DEBUG("Find EoCD of Zip file");
94         return true;
95     }
96     /*
97      * If EoCD contain the comment block, we should find it from the offset of (fileLen - maxCommentSize - 22).
98      * The max size of comment block is 65535, because the comment length is an unsigned 16-bit number.
99      */
100     return FindEocdInHap(hapFile, USHRT_MAX, eocd);
101 }
102 
FindEocdInHap(RandomAccessFile & hapFile,unsigned short maxCommentSize,std::pair<HapByteBuffer,long long> & eocd)103 bool HapSigningBlockUtils::FindEocdInHap(RandomAccessFile& hapFile, unsigned short maxCommentSize,
104     std::pair<HapByteBuffer, long long>& eocd)
105 {
106     long long fileLength = hapFile.GetLength();
107     /* check whether has enough space for EoCD in the file. */
108     if (fileLength < ZIP_EOCD_SEG_MIN_SIZE) {
109         HAPVERIFY_LOG_ERROR("file length %{public}lld is too smaller", fileLength);
110         return false;
111     }
112 
113     int32_t searchRange = static_cast<int>(maxCommentSize) + ZIP_EOCD_SEG_MIN_SIZE;
114     if (fileLength < static_cast<long long>(searchRange)) {
115         searchRange = static_cast<int>(fileLength);
116     }
117 
118     HapByteBuffer searchEocdBuffer(searchRange);
119     long long searchRangeOffset = fileLength - searchEocdBuffer.GetCapacity();
120     long long ret = hapFile.ReadFileFullyFromOffset(searchEocdBuffer, searchRangeOffset);
121     if (ret < 0) {
122         HAPVERIFY_LOG_ERROR("read data from hap file error: %{public}lld", ret);
123         return false;
124     }
125 
126     int32_t eocdOffsetInSearchBuffer = 0;
127     if (!FindEocdInSearchBuffer(searchEocdBuffer, eocdOffsetInSearchBuffer)) {
128         HAPVERIFY_LOG_ERROR("No Eocd is found");
129         return false;
130     }
131 
132     searchEocdBuffer.SetPosition(eocdOffsetInSearchBuffer);
133     searchEocdBuffer.Slice();
134     eocd.first = searchEocdBuffer;
135     eocd.second = searchRangeOffset + eocdOffsetInSearchBuffer;
136     return true;
137 }
138 
139 /*
140  * Eocd format:
141  * 4-bytes: End of central directory flag
142  * 2-bytes: Number of this disk
143  * 2-bytes: Number of the disk with the start of central directory
144  * 2-bytes: Total number of entries in the central directory on this disk
145  * 2-bytes: Total number of entries in the central directory
146  * 4-bytes: Size of central directory
147  * 4-bytes: offset of central directory in zip file
148  * 2-bytes: ZIP file comment length, the value n is in the range of [0, 65535]
149  * n-bytes: ZIP Comment block data
150  *
151  * This function find Eocd by searching Eocd flag from input buffer(searchBuffer) and
152  * making sure the comment length is equal to the expected value.
153  */
FindEocdInSearchBuffer(HapByteBuffer & searchBuffer,int & offset)154 bool HapSigningBlockUtils::FindEocdInSearchBuffer(HapByteBuffer& searchBuffer, int& offset)
155 {
156     int32_t searchBufferSize = searchBuffer.GetCapacity();
157     if (searchBufferSize < ZIP_EOCD_SEG_MIN_SIZE) {
158         HAPVERIFY_LOG_ERROR("The size of searchBuffer %{public}d is smaller than min size of Eocd",
159             searchBufferSize);
160         return false;
161     }
162 
163     int32_t currentOffset = searchBufferSize - ZIP_EOCD_SEG_MIN_SIZE;
164     while (currentOffset >= 0) {
165         int32_t hapEocdSegmentFlag;
166         if (searchBuffer.GetInt32(currentOffset, hapEocdSegmentFlag) &&
167             (hapEocdSegmentFlag == ZIP_EOCD_SEGMENT_FLAG)) {
168             unsigned short commentLength;
169             int32_t expectedCommentLength = searchBufferSize - ZIP_EOCD_SEG_MIN_SIZE - currentOffset;
170             if (searchBuffer.GetUInt16(currentOffset + ZIP_EOCD_COMMENT_LENGTH_OFFSET, commentLength) &&
171                 static_cast<int>(commentLength) == expectedCommentLength) {
172                 offset = currentOffset;
173                 return true;
174             }
175         }
176         currentOffset--;
177     }
178     return false;
179 }
180 
GetCentralDirectoryOffset(HapByteBuffer & eocd,long long eocdOffset,long long & centralDirectoryOffset)181 bool HapSigningBlockUtils::GetCentralDirectoryOffset(HapByteBuffer& eocd, long long eocdOffset,
182     long long& centralDirectoryOffset)
183 {
184     uint32_t offsetValue;
185     uint32_t sizeValue;
186     if (!eocd.GetUInt32(ZIP_CD_OFFSET_IN_EOCD, offsetValue) ||
187         !eocd.GetUInt32(ZIP_CD_SIZE_OFFSET_IN_EOCD, sizeValue)) {
188         HAPVERIFY_LOG_ERROR("GetUInt32 failed");
189         return false;
190     }
191 
192     centralDirectoryOffset = static_cast<long long>(offsetValue);
193     if (centralDirectoryOffset > eocdOffset) {
194         HAPVERIFY_LOG_ERROR("centralDirOffset %{public}lld is larger than eocdOffset %{public}lld",
195             centralDirectoryOffset, eocdOffset);
196         return false;
197     }
198 
199     long long centralDirectorySize = static_cast<long long>(sizeValue);
200     if (centralDirectoryOffset + centralDirectorySize != eocdOffset) {
201         HAPVERIFY_LOG_ERROR("centralDirOffset %{public}lld add centralDirSize %{public}lld is not equal\
202             to eocdOffset %{public}lld", centralDirectoryOffset, centralDirectorySize, eocdOffset);
203         return false;
204     }
205     return true;
206 }
207 
SetUnsignedInt32(HapByteBuffer & buffer,int32_t offset,long long value)208 bool HapSigningBlockUtils::SetUnsignedInt32(HapByteBuffer& buffer, int32_t offset, long long value)
209 {
210     if ((value < 0) || (value > static_cast<long long>(UINT_MAX))) {
211         HAPVERIFY_LOG_ERROR("uint32 value of out range: %{public}lld", value);
212         return false;
213     }
214     buffer.PutInt32(offset, static_cast<int>(value));
215     return true;
216 }
217 
FindHapSigningBlock(RandomAccessFile & hapFile,long long centralDirOffset,SignatureInfo & signInfo)218 bool HapSigningBlockUtils::FindHapSigningBlock(RandomAccessFile& hapFile, long long centralDirOffset,
219     SignatureInfo& signInfo)
220 {
221     if (centralDirOffset < HAP_SIG_BLOCK_MIN_SIZE) {
222         HAPVERIFY_LOG_ERROR("HAP too small for HAP Signing Block: %{public}lld", centralDirOffset);
223         return false;
224     }
225     /*
226      * read hap signing block head, it's format:
227      * int32: blockCount
228      * int64: size
229      * 16 bytes: magic
230      * int32: version
231      */
232     HapByteBuffer hapBlockHead(ZIP_HEAD_OF_SIGNING_BLOCK_LENGTH);
233     long long ret = hapFile.ReadFileFullyFromOffset(hapBlockHead, centralDirOffset - hapBlockHead.GetCapacity());
234     if (ret < 0) {
235         HAPVERIFY_LOG_ERROR("read hapBlockHead error: %{public}lld", ret);
236         return false;
237     }
238     HapSignBlockHead hapSignBlockHead;
239     if (!ParseSignBlockHead(hapSignBlockHead, hapBlockHead)) {
240         HAPVERIFY_LOG_ERROR("ParseSignBlockHead failed");
241         return false;
242     }
243 
244     if (!CheckSignBlockHead(hapSignBlockHead)) {
245         HAPVERIFY_LOG_ERROR("hapSignBlockHead is invalid");
246         return false;
247     }
248 
249     signInfo.version = hapSignBlockHead.version;
250     long long blockArrayLen = hapSignBlockHead.hapSignBlockSize - ZIP_HEAD_OF_SIGNING_BLOCK_LENGTH;
251     long long hapSignBlockOffset = centralDirOffset - hapSignBlockHead.hapSignBlockSize;
252     if (hapSignBlockOffset < 0) {
253         HAPVERIFY_LOG_ERROR("HAP Signing Block offset out of range %{public}lld", hapSignBlockOffset);
254         return false;
255     }
256     signInfo.hapSigningBlockOffset = hapSignBlockOffset;
257     return FindHapSubSigningBlock(hapFile, hapSignBlockHead.blockCount, blockArrayLen, hapSignBlockOffset, signInfo);
258 }
259 
CheckSignBlockHead(const HapSignBlockHead & hapSignBlockHead)260 bool HapSigningBlockUtils::CheckSignBlockHead(const HapSignBlockHead& hapSignBlockHead)
261 {
262     long long magic_low = HAP_SIG_BLOCK_MAGIC_LOW;
263     long long magic_high = HAP_SIG_BLOCK_MAGIC_HIGH;
264     if (hapSignBlockHead.version < VERSION_FOR_NEW_MAGIC_NUM) {
265         magic_low = HAP_SIG_BLOCK_MAGIC_LOW_OLD;
266         magic_high = HAP_SIG_BLOCK_MAGIC_HIGH_OLD;
267     }
268 
269     if ((hapSignBlockHead.hapSignBlockMagicLo != magic_low) ||
270         (hapSignBlockHead.hapSignBlockMagicHi != magic_high)) {
271         HAPVERIFY_LOG_ERROR("No HAP Signing Block before ZIP Central Directory");
272         return false;
273     }
274 
275     if ((hapSignBlockHead.hapSignBlockSize < ZIP_HEAD_OF_SIGNING_BLOCK_LENGTH) ||
276         (hapSignBlockHead.hapSignBlockSize > MAX_HAP_SIGN_BLOCK_SIZE)) {
277         HAPVERIFY_LOG_ERROR("HAP Signing Block size out of range %{public}lld",
278             hapSignBlockHead.hapSignBlockSize);
279         return false;
280     }
281 
282     if (hapSignBlockHead.blockCount > MAX_BLOCK_COUNT) {
283         HAPVERIFY_LOG_ERROR("HAP Signing Block count out of range %{public}d", hapSignBlockHead.blockCount);
284         return false;
285     }
286 
287     return true;
288 }
289 
ParseSignBlockHead(HapSignBlockHead & hapSignBlockHead,HapByteBuffer & hapBlockHead)290 bool HapSigningBlockUtils::ParseSignBlockHead(HapSignBlockHead& hapSignBlockHead, HapByteBuffer& hapBlockHead)
291 {
292     return hapBlockHead.GetInt32(hapSignBlockHead.blockCount) &&
293         hapBlockHead.GetInt64(hapSignBlockHead.hapSignBlockSize) &&
294         hapBlockHead.GetInt64(hapSignBlockHead.hapSignBlockMagicLo) &&
295         hapBlockHead.GetInt64(hapSignBlockHead.hapSignBlockMagicHi) &&
296         hapBlockHead.GetInt32(hapSignBlockHead.version);
297 }
298 
ParseSubSignBlockHead(HapSubSignBlockHead & subSignBlockHead,HapByteBuffer & hapBlockHead)299 bool HapSigningBlockUtils::ParseSubSignBlockHead(HapSubSignBlockHead& subSignBlockHead, HapByteBuffer& hapBlockHead)
300 {
301     return hapBlockHead.GetUInt32(subSignBlockHead.type) &&
302         hapBlockHead.GetUInt32(subSignBlockHead.length) &&
303         hapBlockHead.GetUInt32(subSignBlockHead.offset);
304 }
305 
306 /*
307  * Hap Sign Block Format:
308  * HapSubSignBlock1_Head
309  * HapSubSignBlock2_Head
310  * ...
311  * HapSubSignBlockn_Head
312  * HapSubSignBlock1_data
313  * HapSubSignBlock2_data
314  * ...
315  * HapSubSignBlockn_data
316  * hap signing block head
317  *
318  * This function reads the head of the HapSubSignBlocks,
319  * and then reads the corresponding data of each block according to the offset provided by the head
320  */
FindHapSubSigningBlock(RandomAccessFile & hapFile,int32_t blockCount,long long blockArrayLen,long long hapSignBlockOffset,SignatureInfo & signInfo)321 bool HapSigningBlockUtils::FindHapSubSigningBlock(RandomAccessFile& hapFile, int32_t blockCount,
322     long long blockArrayLen, long long hapSignBlockOffset, SignatureInfo& signInfo)
323 {
324     long long offsetMax = hapSignBlockOffset + blockArrayLen;
325     long long readLen = 0;
326     long long readHeadOffset = hapSignBlockOffset;
327     HAPVERIFY_LOG_DEBUG("hapSignBlockOffset %{public}lld blockArrayLen: %{public}lld blockCount: %{public}d",
328         hapSignBlockOffset, blockArrayLen, blockCount);
329     for (int32_t i = 0; i < blockCount; i++) {
330         HapByteBuffer hapBlockHead(ZIP_CD_SIZE_OFFSET_IN_EOCD);
331         long long ret = hapFile.ReadFileFullyFromOffset(hapBlockHead, readHeadOffset);
332         if (ret < 0) {
333             HAPVERIFY_LOG_ERROR("read hapBlockHead error: %{public}lld", ret);
334             return false;
335         }
336         HapSubSignBlockHead subSignBlockHead;
337         if (!ParseSubSignBlockHead(subSignBlockHead, hapBlockHead)) {
338             HAPVERIFY_LOG_ERROR("ParseSubSignBlockHead failed");
339             return false;
340         }
341         readLen += sizeof(HapSubSignBlockHead);
342 
343         readHeadOffset += sizeof(HapSubSignBlockHead);
344         if (readHeadOffset > offsetMax) {
345             HAPVERIFY_LOG_ERROR("find %{public}dst next head offset error", i);
346             return false;
347         }
348 
349         long long headOffset = static_cast<long long>(subSignBlockHead.offset);
350         long long headLength = static_cast<long long>(subSignBlockHead.length);
351         /* check subSignBlockHead */
352         if ((offsetMax - headOffset) < hapSignBlockOffset) {
353             HAPVERIFY_LOG_ERROR("Find %{public}dst subblock data offset error", i);
354             return false;
355         }
356         if ((blockArrayLen - headLength) < readLen) {
357             HAPVERIFY_LOG_ERROR("no enough data to be read for %{public}dst subblock", i);
358             return false;
359         }
360 
361         long long dataOffset = hapSignBlockOffset + headOffset;
362         HapByteBuffer signBuffer(subSignBlockHead.length);
363         ret = hapFile.ReadFileFullyFromOffset(signBuffer, dataOffset);
364         if (ret < 0) {
365             HAPVERIFY_LOG_ERROR("read %{public}dst subblock error: %{public}lld", i, ret);
366             return false;
367         }
368         readLen += headLength;
369 
370         if (!ClassifyHapSubSigningBlock(signInfo, signBuffer, subSignBlockHead.type)) {
371             HAPVERIFY_LOG_ERROR("ClassifyHapSubSigningBlock error, type is %{public}d",
372                 subSignBlockHead.type);
373             return false;
374         }
375     }
376 
377     /* size of block must be equal to the sum of all subblocks length */
378     if (readLen != blockArrayLen) {
379         HAPVERIFY_LOG_ERROR("readLen: %{public}lld is not same as blockArrayLen: %{public}lld",
380             readLen, blockArrayLen);
381         return false;
382     }
383     return true;
384 }
385 
ClassifyHapSubSigningBlock(SignatureInfo & signInfo,const HapByteBuffer & subBlock,uint32_t type)386 bool HapSigningBlockUtils::ClassifyHapSubSigningBlock(SignatureInfo& signInfo,
387     const HapByteBuffer& subBlock, uint32_t type)
388 {
389     bool ret = false;
390     switch (type) {
391         case HAP_SIGN_BLOB: {
392             if (signInfo.hapSignatureBlock.GetCapacity() != 0) {
393                 HAPVERIFY_LOG_ERROR("find more than one hap sign block");
394                 break;
395             }
396             signInfo.hapSignatureBlock = subBlock;
397             ret = true;
398             break;
399         }
400         case PROFILE_BLOB:
401         case PROOF_ROTATION_BLOB:
402         case PROPERTY_BLOB: {
403             OptionalBlock optionalBlock;
404             optionalBlock.optionalType = static_cast<int>(type);
405             optionalBlock.optionalBlockValue = subBlock;
406             signInfo.optionBlocks.push_back(optionalBlock);
407             ret = true;
408             break;
409         }
410         default:
411             break;
412     }
413     return ret;
414 }
415 
GetOptionalBlockIndex(std::vector<OptionalBlock> & optionBlocks,int32_t type,int & index)416 bool HapSigningBlockUtils::GetOptionalBlockIndex(std::vector<OptionalBlock>& optionBlocks, int32_t type, int& index)
417 {
418     int32_t len = static_cast<int>(optionBlocks.size());
419     for (int32_t i = 0; i < len; i++) {
420         if (optionBlocks[i].optionalType == type) {
421             index = i;
422             return true;
423         }
424     }
425     return false;
426 }
427 
VerifyHapIntegrity(Pkcs7Context & digestInfo,RandomAccessFile & hapFile,SignatureInfo & signInfo)428 bool HapSigningBlockUtils::VerifyHapIntegrity(
429     Pkcs7Context& digestInfo, RandomAccessFile& hapFile, SignatureInfo& signInfo)
430 {
431     if (!SetUnsignedInt32(signInfo.hapEocd, ZIP_CD_OFFSET_IN_EOCD, signInfo.hapSigningBlockOffset)) {
432         HAPVERIFY_LOG_ERROR("Set central dir offset failed");
433         return false;
434     }
435 
436     long long contentsZipSize = signInfo.hapSigningBlockOffset;
437     long long centralDirSize = signInfo.hapEocdOffset - signInfo.hapCentralDirOffset;
438     HapFileDataSource contentsZip(hapFile, 0, contentsZipSize, 0);
439     HapFileDataSource centralDir(hapFile, signInfo.hapCentralDirOffset, centralDirSize, 0);
440     HapByteBufferDataSource eocd(signInfo.hapEocd);
441     DataSource* content[ZIP_BLOCKS_NUM_NEED_DIGEST] = { &contentsZip, &centralDir, &eocd };
442     int32_t nId = HapVerifyOpensslUtils::GetDigestAlgorithmId(digestInfo.digestAlgorithm);
443     DigestParameter digestParam = GetDigestParameter(nId);
444     HapByteBuffer chunkDigest;
445     int32_t chunkCount = 0;
446     int32_t sumOfChunksLen = 0;
447     if (!GetSumOfChunkDigestLen(content, ZIP_BLOCKS_NUM_NEED_DIGEST, digestParam.digestOutputSizeBytes,
448         chunkCount, sumOfChunksLen)) {
449         HAPVERIFY_LOG_ERROR("GetSumOfChunkDigestLen failed");
450         return false;
451     }
452     chunkDigest.SetCapacity(sumOfChunksLen);
453     chunkDigest.PutByte(0, ZIP_FIRST_LEVEL_CHUNK_PREFIX);
454     chunkDigest.PutInt32(1, chunkCount);
455     if (!HapVerifyParallelizationSupported() || contentsZipSize <= SMALL_FILE_SIZE) {
456         // No parallel for small size <= 2MB.
457         int32_t offset = ZIP_CHUNK_DIGEST_PRIFIX_LEN;
458         if (!ComputeDigestsForDataSourceArray(digestParam, content, ZIP_BLOCKS_NUM_NEED_DIGEST, chunkDigest, offset)) {
459             HAPVERIFY_LOG_ERROR("Compute Content Digests failed, alg: %{public}d", nId);
460             return false;
461         }
462     } else {
463         // Compute digests for contents zip in parallel.
464         int32_t contentsZipChunkCount = GetChunkCount(contentsZipSize, CHUNK_SIZE);
465         if (!ComputeDigestsForContentsZip(nId, hapFile, contentsZipChunkCount, contentsZipSize, chunkDigest)) {
466             HAPVERIFY_LOG_ERROR("ComputeDigestsForContentsZip failed, alg: %{public}d", nId);
467             return false;
468         }
469         // Compute digests for other contents.
470         int32_t offset = ZIP_CHUNK_DIGEST_PRIFIX_LEN + contentsZipChunkCount * digestParam.digestOutputSizeBytes;
471         if (!ComputeDigestsForDataSourceArray(digestParam, content + 1,
472             ZIP_BLOCKS_NUM_NEED_DIGEST - 1, chunkDigest, offset)) {
473             HAPVERIFY_LOG_ERROR("Compute Content Digests failed, alg: %{public}d", nId);
474             return false;
475         }
476     }
477 
478     return VerifyDigest(digestParam, nId, signInfo.optionBlocks, chunkDigest, digestInfo);
479 }
480 
VerifyDigest(const DigestParameter & digestParam,const int32_t nId,const std::vector<OptionalBlock> & optionalBlocks,const HapByteBuffer & chunkDigest,Pkcs7Context & digestInfo)481 bool HapSigningBlockUtils::VerifyDigest(const DigestParameter& digestParam, const int32_t nId,
482     const std::vector<OptionalBlock>& optionalBlocks, const HapByteBuffer& chunkDigest, Pkcs7Context& digestInfo)
483 {
484     HapByteBuffer actualDigest;
485     if (!ComputeDigestsWithOptionalBlock(digestParam, optionalBlocks, chunkDigest, actualDigest)) {
486         HAPVERIFY_LOG_ERROR("Compute Final Digests failed, alg: %{public}d", nId);
487         return false;
488     }
489 
490     if (!digestInfo.content.IsEqual(actualDigest)) {
491         HAPVERIFY_LOG_ERROR("digest of contents verify failed, alg %{public}d", nId);
492         return false;
493     }
494     return true;
495 }
496 
ComputeDigestsWithOptionalBlock(const DigestParameter & digestParam,const std::vector<OptionalBlock> & optionalBlocks,const HapByteBuffer & chunkDigest,HapByteBuffer & finalDigest)497 bool HapSigningBlockUtils::ComputeDigestsWithOptionalBlock(const DigestParameter& digestParam,
498     const std::vector<OptionalBlock>& optionalBlocks, const HapByteBuffer& chunkDigest, HapByteBuffer& finalDigest)
499 {
500     unsigned char out[EVP_MAX_MD_SIZE];
501     int32_t digestLen = HapVerifyOpensslUtils::GetDigest(chunkDigest, optionalBlocks, digestParam, out);
502     if (digestLen != digestParam.digestOutputSizeBytes) {
503         HAPVERIFY_LOG_ERROR("GetDigest failed, outLen is not right, %{public}u, %{public}d",
504             digestLen, digestParam.digestOutputSizeBytes);
505         return false;
506     }
507 
508     finalDigest.SetCapacity(digestParam.digestOutputSizeBytes);
509     finalDigest.PutData(0, reinterpret_cast<char*>(out), digestParam.digestOutputSizeBytes);
510     return true;
511 }
512 
GetSumOfChunkDigestLen(DataSource * contents[],int32_t len,int32_t chunkDigestLen,int & chunkCount,int & sumOfChunkDigestLen)513 bool HapSigningBlockUtils::GetSumOfChunkDigestLen(DataSource* contents[], int32_t len,
514     int32_t chunkDigestLen, int& chunkCount, int& sumOfChunkDigestLen)
515 {
516     for (int32_t i = 0; i < len; i++) {
517         if (contents[i] == nullptr) {
518             HAPVERIFY_LOG_ERROR("contents[%{public}d] is nullptr", i);
519             return false;
520         }
521         contents[i]->Reset();
522         chunkCount += GetChunkCount(contents[i]->Remaining(), CHUNK_SIZE);
523     }
524 
525     if (chunkCount <= 0) {
526         HAPVERIFY_LOG_ERROR("no content for digest");
527         return false;
528     }
529 
530     if (chunkDigestLen < 0 || ((INT_MAX - ZIP_CHUNK_DIGEST_PRIFIX_LEN) / chunkCount) < chunkDigestLen) {
531         HAPVERIFY_LOG_ERROR("overflow chunkCount: %{public}d, chunkDigestLen: %{public}d",
532             chunkCount, chunkDigestLen);
533         return false;
534     }
535 
536     sumOfChunkDigestLen = ZIP_CHUNK_DIGEST_PRIFIX_LEN + chunkCount * chunkDigestLen;
537     return true;
538 }
539 
ComputeDigestsForContentsZip(int32_t nId,RandomAccessFile & hapFile,int32_t chunkNum,long long contentsZipSize,HapByteBuffer & digestsBuffer)540 bool HapSigningBlockUtils::ComputeDigestsForContentsZip(int32_t nId, RandomAccessFile& hapFile, int32_t chunkNum,
541     long long contentsZipSize, HapByteBuffer& digestsBuffer)
542 {
543     int32_t chunkNumToUpdate = (chunkNum + ZIP_UPDATE_DIGEST_THREADS_NUM - 1) / ZIP_UPDATE_DIGEST_THREADS_NUM;
544     std::vector<std::thread> threads;
545     std::vector<std::atomic<bool>> results(ZIP_UPDATE_DIGEST_THREADS_NUM);
546     for (int32_t i = 0; i < ZIP_UPDATE_DIGEST_THREADS_NUM; i++) {
547         results[i].store(false, std::memory_order_seq_cst);
548     }
549 
550     for (int32_t i = 0; i < ZIP_UPDATE_DIGEST_THREADS_NUM; i++) {
551         threads.emplace_back([&results, &digestsBuffer, &hapFile, i, nId, chunkNumToUpdate, contentsZipSize]() {
552             long long fileBeginPosition = CHUNK_SIZE * chunkNumToUpdate * i;
553             long long fileEndPosition = std::min(CHUNK_SIZE * chunkNumToUpdate * (i + 1), contentsZipSize);
554             long long fileSize = fileEndPosition - fileBeginPosition;
555             if (fileSize <= 0) {
556                 results[i].store(true, std::memory_order_seq_cst);
557                 return;
558             }
559             HapFileDataSource hapDataChunk(hapFile, fileBeginPosition, fileSize, 0);
560             DigestParameter digestParam = GetDigestParameter(nId);
561             int32_t digestOffset =
562                 ZIP_CHUNK_DIGEST_PRIFIX_LEN + chunkNumToUpdate * digestParam.digestOutputSizeBytes * i;
563             results[i].store(
564                 ComputeDigestsForDataSource(digestParam, &hapDataChunk, digestsBuffer, digestOffset),
565                 std::memory_order_seq_cst);
566         });
567     }
568 
569     for (auto& thread : threads) {
570         thread.join();
571     }
572 
573     for (const auto& atomicResult : results) {
574         if (!atomicResult.load(std::memory_order_seq_cst)) {
575             HAPVERIFY_LOG_ERROR("Compute digests failed");
576             return false;
577         }
578     }
579 
580     return true;
581 }
582 
ComputeDigestsForDataSource(const DigestParameter & digestParam,DataSource * content,HapByteBuffer & result,int32_t & offset)583 bool HapSigningBlockUtils::ComputeDigestsForDataSource(const DigestParameter& digestParam, DataSource* content,
584     HapByteBuffer& result, int32_t& offset)
585 {
586     unsigned char out[EVP_MAX_MD_SIZE];
587     unsigned char chunkContentPrefix[ZIP_CHUNK_DIGEST_PRIFIX_LEN] = {ZIP_SECOND_LEVEL_CHUNK_PREFIX, 0, 0, 0, 0};
588     while (content->HasRemaining()) {
589         int32_t chunkSize = std::min(content->Remaining(), CHUNK_SIZE);
590         if (!InitDigestPrefix(digestParam, chunkContentPrefix, chunkSize)) {
591             HAPVERIFY_LOG_ERROR("InitDigestPrefix failed");
592             return false;
593         }
594 
595         if (!content->ReadDataAndDigestUpdate(digestParam, chunkSize)) {
596             HAPVERIFY_LOG_ERROR("Copy Partial Buffer failed");
597             return false;
598         }
599 
600         int32_t digestLen = HapVerifyOpensslUtils::GetDigest(digestParam, out);
601         if (digestLen != digestParam.digestOutputSizeBytes) {
602             HAPVERIFY_LOG_ERROR("GetDigest failed len: %{public}d digestSizeBytes: %{public}d",
603                 digestLen, digestParam.digestOutputSizeBytes);
604             return false;
605         }
606         result.PutData(offset, reinterpret_cast<char*>(out), digestParam.digestOutputSizeBytes);
607         offset += digestLen;
608     }
609     return true;
610 }
611 
ComputeDigestsForDataSourceArray(const DigestParameter & digestParam,DataSource * contents[],int32_t len,HapByteBuffer & result,int32_t offset)612 bool HapSigningBlockUtils::ComputeDigestsForDataSourceArray(const DigestParameter& digestParam,
613     DataSource* contents[], int32_t len, HapByteBuffer& result, int32_t offset)
614 {
615     for (int32_t i = 0; i < len; i++) {
616         if (!ComputeDigestsForDataSource(digestParam, contents[i], result, offset)) {
617             HAPVERIFY_LOG_ERROR("Compute digest failed");
618             return false;
619         }
620     }
621     return true;
622 }
623 
GetDigestParameter(int32_t nId)624 DigestParameter HapSigningBlockUtils::GetDigestParameter(int32_t nId)
625 {
626     DigestParameter digestParam;
627     digestParam.digestOutputSizeBytes = HapVerifyOpensslUtils::GetDigestAlgorithmOutputSizeBytes(nId);
628     digestParam.md = EVP_get_digestbynid(nId);
629     digestParam.ptrCtx = EVP_MD_CTX_create();
630     EVP_MD_CTX_init(digestParam.ptrCtx);
631     return digestParam;
632 }
633 
GetChunkCount(long long inputSize,long long chunkSize)634 int32_t HapSigningBlockUtils::GetChunkCount(long long inputSize, long long chunkSize)
635 {
636     if (chunkSize <= 0 || inputSize > LLONG_MAX - chunkSize) {
637         return 0;
638     }
639 
640     long long res = (inputSize + chunkSize - 1) / chunkSize;
641     if (res > INT_MAX || res < 0) {
642         return 0;
643     }
644     return static_cast<int>(res);
645 }
646 
InitDigestPrefix(const DigestParameter & digestParam,unsigned char (& chunkContentPrefix)[ZIP_CHUNK_DIGEST_PRIFIX_LEN],int32_t chunkLen)647 bool HapSigningBlockUtils::InitDigestPrefix(const DigestParameter& digestParam,
648     unsigned char (&chunkContentPrefix)[ZIP_CHUNK_DIGEST_PRIFIX_LEN], int32_t chunkLen)
649 {
650     if (memcpy_s((chunkContentPrefix + 1), ZIP_CHUNK_DIGEST_PRIFIX_LEN - 1, (&chunkLen), sizeof(chunkLen)) != EOK) {
651         HAPVERIFY_LOG_ERROR("memcpy_s failed");
652         return false;
653     }
654 
655     if (!HapVerifyOpensslUtils::DigestInit(digestParam)) {
656         HAPVERIFY_LOG_ERROR("DigestInit failed");
657         return false;
658     }
659 
660     if (!HapVerifyOpensslUtils::DigestUpdate(digestParam, chunkContentPrefix, ZIP_CHUNK_DIGEST_PRIFIX_LEN)) {
661         HAPVERIFY_LOG_ERROR("DigestUpdate failed");
662         return false;
663     }
664     return true;
665 }
666 
HapVerifyParallelizationSupported()667 bool HapSigningBlockUtils::HapVerifyParallelizationSupported()
668 {
669     return OHOS::system::GetBoolParameter("const.appverify.hap_verify_parallel", false);
670 }
671 } // namespace Verify
672 } // namespace Security
673 } // namespace OHOS
674 
675