• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package com.android.apksig.internal.util;
18 
19 import static java.util.concurrent.TimeUnit.MILLISECONDS;
20 
21 import com.android.apksig.internal.zip.ZipUtils;
22 import com.android.apksig.util.DataSink;
23 import com.android.apksig.util.DataSource;
24 import com.android.apksig.util.DataSources;
25 
26 import java.io.IOException;
27 import java.nio.ByteBuffer;
28 import java.nio.ByteOrder;
29 import java.security.MessageDigest;
30 import java.security.NoSuchAlgorithmException;
31 
32 import java.util.ArrayList;
33 import java.util.concurrent.ArrayBlockingQueue;
34 import java.util.concurrent.ExecutorService;
35 import java.util.concurrent.Phaser;
36 import java.util.concurrent.ThreadPoolExecutor;
37 import java.util.concurrent.TimeUnit;
38 
39 /**
40  * VerityTreeBuilder is used to generate the root hash of verity tree built from the input file.
41  * The root hash can be used on device for on-access verification.  The tree itself is reproducible
42  * on device, and is not shipped with the APK.
43  */
44 public class VerityTreeBuilder implements AutoCloseable {
45 
46     /**
47      * Maximum size (in bytes) of each node of the tree.
48      */
49     private final static int CHUNK_SIZE = 4096;
50     /**
51      * Maximum parallelism while calculating digests.
52      */
53     private final static int DIGEST_PARALLELISM = Math.min(32,
54             Runtime.getRuntime().availableProcessors());
55     /**
56      * Queue size.
57      */
58     private final static int MAX_OUTSTANDING_CHUNKS = 4;
59     /**
60      * Typical prefetch size.
61      */
62     private final static int MAX_PREFETCH_CHUNKS = 1024;
63     /**
64      * Minimum chunks to be processed by a single worker task.
65      */
66     private final static int MIN_CHUNKS_PER_WORKER = 8;
67 
68     /**
69      * Digest algorithm (JCA Digest algorithm name) used in the tree.
70      */
71     private final static String JCA_ALGORITHM = "SHA-256";
72 
73     /**
74      * Optional salt to apply before each digestion.
75      */
76     private final byte[] mSalt;
77 
78     private final MessageDigest mMd;
79 
80     private final ExecutorService mExecutor =
81             new ThreadPoolExecutor(DIGEST_PARALLELISM, DIGEST_PARALLELISM,
82                     0L, MILLISECONDS,
83                     new ArrayBlockingQueue<>(MAX_OUTSTANDING_CHUNKS),
84                     new ThreadPoolExecutor.CallerRunsPolicy());
85 
VerityTreeBuilder(byte[] salt)86     public VerityTreeBuilder(byte[] salt) throws NoSuchAlgorithmException {
87         mSalt = salt;
88         mMd = getNewMessageDigest();
89     }
90 
91     @Override
close()92     public void close() {
93         mExecutor.shutdownNow();
94     }
95 
96     /**
97      * Returns the root hash of the APK verity tree built from ZIP blocks.
98      *
99      * Specifically, APK verity tree is built from the APK, but as if the APK Signing Block (which
100      * must be page aligned) and the "Central Directory offset" field in End of Central Directory
101      * are skipped.
102      */
generateVerityTreeRootHash(DataSource beforeApkSigningBlock, DataSource centralDir, DataSource eocd)103     public byte[] generateVerityTreeRootHash(DataSource beforeApkSigningBlock,
104             DataSource centralDir, DataSource eocd) throws IOException {
105         if (beforeApkSigningBlock.size() % CHUNK_SIZE != 0) {
106             throw new IllegalStateException("APK Signing Block size not a multiple of " + CHUNK_SIZE
107                     + ": " + beforeApkSigningBlock.size());
108         }
109 
110         // Ensure that, when digesting, ZIP End of Central Directory record's Central Directory
111         // offset field is treated as pointing to the offset at which the APK Signing Block will
112         // start.
113         long centralDirOffsetForDigesting = beforeApkSigningBlock.size();
114         ByteBuffer eocdBuf = ByteBuffer.allocate((int) eocd.size());
115         eocdBuf.order(ByteOrder.LITTLE_ENDIAN);
116         eocd.copyTo(0, (int) eocd.size(), eocdBuf);
117         eocdBuf.flip();
118         ZipUtils.setZipEocdCentralDirectoryOffset(eocdBuf, centralDirOffsetForDigesting);
119 
120         return generateVerityTreeRootHash(new ChainedDataSource(beforeApkSigningBlock, centralDir,
121                     DataSources.asDataSource(eocdBuf)));
122     }
123 
124     /**
125      * Returns the root hash of the verity tree built from the data source.
126      */
generateVerityTreeRootHash(DataSource fileSource)127     public byte[] generateVerityTreeRootHash(DataSource fileSource) throws IOException {
128         ByteBuffer verityBuffer = generateVerityTree(fileSource);
129         return getRootHashFromTree(verityBuffer);
130     }
131 
132     /**
133      * Returns the byte buffer that contains the whole verity tree.
134      *
135      * The tree is built bottom up. The bottom level has 256-bit digest for each 4 KB block in the
136      * input file.  If the total size is larger than 4 KB, take this level as input and repeat the
137      * same procedure, until the level is within 4 KB.  If salt is given, it will apply to each
138      * digestion before the actual data.
139      *
140      * The returned root hash is calculated from the last level of 4 KB chunk, similarly with salt.
141      *
142      * The tree is currently stored only in memory and is never written out.  Nevertheless, it is
143      * the actual verity tree format on disk, and is supposed to be re-generated on device.
144      */
generateVerityTree(DataSource fileSource)145     public ByteBuffer generateVerityTree(DataSource fileSource) throws IOException {
146         int digestSize = mMd.getDigestLength();
147 
148         // Calculate the summed area table of level size. In other word, this is the offset
149         // table of each level, plus the next non-existing level.
150         int[] levelOffset = calculateLevelOffset(fileSource.size(), digestSize);
151 
152         ByteBuffer verityBuffer = ByteBuffer.allocate(levelOffset[levelOffset.length - 1]);
153 
154         // Generate the hash tree bottom-up.
155         for (int i = levelOffset.length - 2; i >= 0; i--) {
156             DataSink middleBufferSink = new ByteBufferSink(
157                     slice(verityBuffer, levelOffset[i], levelOffset[i + 1]));
158             DataSource src;
159             if (i == levelOffset.length - 2) {
160                 src = fileSource;
161                 digestDataByChunks(src, middleBufferSink);
162             } else {
163                 src = DataSources.asDataSource(slice(verityBuffer.asReadOnlyBuffer(),
164                         levelOffset[i + 1], levelOffset[i + 2]));
165                 digestDataByChunks(src, middleBufferSink);
166             }
167 
168             // If the output is not full chunk, pad with 0s.
169             long totalOutput = divideRoundup(src.size(), CHUNK_SIZE) * digestSize;
170             int incomplete = (int) (totalOutput % CHUNK_SIZE);
171             if (incomplete > 0) {
172                 byte[] padding = new byte[CHUNK_SIZE - incomplete];
173                 middleBufferSink.consume(padding, 0, padding.length);
174             }
175         }
176         return verityBuffer;
177     }
178 
179     /**
180      * Returns the digested root hash from the top level (only page) of a verity tree.
181      */
getRootHashFromTree(ByteBuffer verityBuffer)182     public byte[] getRootHashFromTree(ByteBuffer verityBuffer) throws IOException {
183         ByteBuffer firstPage = slice(verityBuffer.asReadOnlyBuffer(), 0, CHUNK_SIZE);
184         return saltedDigest(firstPage);
185     }
186 
187     /**
188      * Returns an array of summed area table of level size in the verity tree.  In other words, the
189      * returned array is offset of each level in the verity tree file format, plus an additional
190      * offset of the next non-existing level (i.e. end of the last level + 1).  Thus the array size
191      * is level + 1.
192      */
calculateLevelOffset(long dataSize, int digestSize)193     private static int[] calculateLevelOffset(long dataSize, int digestSize) {
194         // Compute total size of each level, bottom to top.
195         ArrayList<Long> levelSize = new ArrayList<>();
196         while (true) {
197             long chunkCount = divideRoundup(dataSize, CHUNK_SIZE);
198             long size = CHUNK_SIZE * divideRoundup(chunkCount * digestSize, CHUNK_SIZE);
199             levelSize.add(size);
200             if (chunkCount * digestSize <= CHUNK_SIZE) {
201                 break;
202             }
203             dataSize = chunkCount * digestSize;
204         }
205 
206         // Reverse and convert to summed area table.
207         int[] levelOffset = new int[levelSize.size() + 1];
208         levelOffset[0] = 0;
209         for (int i = 0; i < levelSize.size(); i++) {
210             // We don't support verity tree if it is larger then Integer.MAX_VALUE.
211             levelOffset[i + 1] = levelOffset[i] + Math.toIntExact(
212                     levelSize.get(levelSize.size() - i - 1));
213         }
214         return levelOffset;
215     }
216 
217     /**
218      * Digest data source by chunks then feeds them to the sink one by one.  If the last unit is
219      * less than the chunk size and padding is desired, feed with extra padding 0 to fill up the
220      * chunk before digesting.
221      */
digestDataByChunks(DataSource dataSource, DataSink dataSink)222     private void digestDataByChunks(DataSource dataSource, DataSink dataSink) throws IOException {
223         final long size = dataSource.size();
224         final int chunks = (int) divideRoundup(size, CHUNK_SIZE);
225 
226         /** Single IO operation size, in chunks. */
227         final int ioSizeChunks = MAX_PREFETCH_CHUNKS;
228 
229         final byte[][] hashes = new byte[chunks][];
230 
231         Phaser tasks = new Phaser(1);
232 
233         // Reading the input file as fast as we can.
234         final long maxReadSize = ioSizeChunks * CHUNK_SIZE;
235 
236         long readOffset = 0;
237         int startChunkIndex = 0;
238         while (readOffset < size) {
239             final long readLimit = Math.min(readOffset + maxReadSize, size);
240             final int readSize = (int) (readLimit - readOffset);
241             final int bufferSizeChunks = (int) divideRoundup(readSize, CHUNK_SIZE);
242 
243             // Overllocating to zero-pad last chunk.
244             // With 4MiB block size, 32 threads and 4 queue size we might allocate up to 144MiB.
245             final ByteBuffer buffer = ByteBuffer.allocate(bufferSizeChunks * CHUNK_SIZE);
246             dataSource.copyTo(readOffset, readSize, buffer);
247             buffer.rewind();
248 
249             final int readChunkIndex = startChunkIndex;
250             Runnable task = () -> {
251                 final MessageDigest md = cloneMessageDigest();
252                 for (int offset = 0, finish = buffer.capacity(), chunkIndex = readChunkIndex;
253                         offset < finish; offset += CHUNK_SIZE, ++chunkIndex) {
254                     ByteBuffer chunk = slice(buffer, offset, offset + CHUNK_SIZE);
255                     hashes[chunkIndex] = saltedDigest(md, chunk);
256                 }
257                 tasks.arriveAndDeregister();
258             };
259             tasks.register();
260             mExecutor.execute(task);
261 
262             startChunkIndex += bufferSizeChunks;
263             readOffset += readSize;
264         }
265 
266         // Waiting for the tasks to complete.
267         tasks.arriveAndAwaitAdvance();
268 
269         // Streaming hashes back.
270         for (byte[] hash : hashes) {
271             dataSink.consume(hash, 0, hash.length);
272         }
273     }
274 
275     /** Returns the digest of data with salt prepended. */
saltedDigest(ByteBuffer data)276     private byte[] saltedDigest(ByteBuffer data) {
277         return saltedDigest(mMd, data);
278     }
279 
saltedDigest(MessageDigest md, ByteBuffer data)280     private byte[] saltedDigest(MessageDigest md, ByteBuffer data) {
281         md.reset();
282         if (mSalt != null) {
283             md.update(mSalt);
284         }
285         md.update(data);
286         return md.digest();
287     }
288 
289     /** Divides a number and round up to the closest integer. */
divideRoundup(long dividend, long divisor)290     private static long divideRoundup(long dividend, long divisor) {
291         return (dividend + divisor - 1) / divisor;
292     }
293 
294     /** Returns a slice of the buffer with shared the content. */
slice(ByteBuffer buffer, int begin, int end)295     private static ByteBuffer slice(ByteBuffer buffer, int begin, int end) {
296         ByteBuffer b = buffer.duplicate();
297         b.position(0);  // to ensure position <= limit invariant.
298         b.limit(end);
299         b.position(begin);
300         return b.slice();
301     }
302 
303     /**
304      * Obtains a new instance of the message digest algorithm.
305      */
getNewMessageDigest()306     private static MessageDigest getNewMessageDigest() throws NoSuchAlgorithmException {
307         return MessageDigest.getInstance(JCA_ALGORITHM);
308     }
309 
310     /**
311      * Clones the existing message digest, or creates a new instance if clone is unavailable.
312      */
cloneMessageDigest()313     private MessageDigest cloneMessageDigest() {
314         try {
315             return (MessageDigest) mMd.clone();
316         } catch (CloneNotSupportedException ignored) {
317             try {
318                 return getNewMessageDigest();
319             } catch (NoSuchAlgorithmException e) {
320                 throw new IllegalStateException(
321                         "Failed to obtain an instance of a previously available message digest", e);
322             }
323         }
324     }
325 }
326