• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package com.android.apksig.internal.util;
18 
19 import static java.util.concurrent.TimeUnit.MILLISECONDS;
20 
21 import com.android.apksig.internal.zip.ZipUtils;
22 import com.android.apksig.util.DataSink;
23 import com.android.apksig.util.DataSource;
24 import com.android.apksig.util.DataSources;
25 
26 import java.io.IOException;
27 import java.nio.ByteBuffer;
28 import java.nio.ByteOrder;
29 import java.security.MessageDigest;
30 import java.security.NoSuchAlgorithmException;
31 import java.util.ArrayList;
32 import java.util.concurrent.ArrayBlockingQueue;
33 import java.util.concurrent.ExecutorService;
34 import java.util.concurrent.Phaser;
35 import java.util.concurrent.ThreadPoolExecutor;
36 
37 /**
38  * VerityTreeBuilder is used to generate the root hash of verity tree built from the input file.
39  * The root hash can be used on device for on-access verification.  The tree itself is reproducible
40  * on device, and is not shipped with the APK.
41  */
42 public class VerityTreeBuilder implements AutoCloseable {
43 
44     /**
45      * Maximum size (in bytes) of each node of the tree.
46      */
47     private final static int CHUNK_SIZE = 4096;
48     /**
49      * Maximum parallelism while calculating digests.
50      */
51     private final static int DIGEST_PARALLELISM = Math.min(32,
52             Runtime.getRuntime().availableProcessors());
53     /**
54      * Queue size.
55      */
56     private final static int MAX_OUTSTANDING_CHUNKS = 4;
57     /**
58      * Typical prefetch size.
59      */
60     private final static int MAX_PREFETCH_CHUNKS = 1024;
61 
62     /**
63      * Digest algorithm (JCA Digest algorithm name) used in the tree.
64      */
65     private final static String JCA_ALGORITHM = "SHA-256";
66 
67     /**
68      * Optional salt to apply before each digestion.
69      */
70     private final byte[] mSalt;
71 
72     private final MessageDigest mMd;
73 
74     private final ExecutorService mExecutor =
75             new ThreadPoolExecutor(DIGEST_PARALLELISM, DIGEST_PARALLELISM,
76                     0L, MILLISECONDS,
77                     new ArrayBlockingQueue<>(MAX_OUTSTANDING_CHUNKS),
78                     new ThreadPoolExecutor.CallerRunsPolicy());
79 
VerityTreeBuilder(byte[] salt)80     public VerityTreeBuilder(byte[] salt) throws NoSuchAlgorithmException {
81         mSalt = salt;
82         mMd = getNewMessageDigest();
83     }
84 
85     @Override
close()86     public void close() {
87         mExecutor.shutdownNow();
88     }
89 
90     /**
91      * Returns the root hash of the APK verity tree built from ZIP blocks.
92      *
93      * Specifically, APK verity tree is built from the APK, but as if the APK Signing Block (which
94      * must be page aligned) and the "Central Directory offset" field in End of Central Directory
95      * are skipped.
96      */
generateVerityTreeRootHash(DataSource beforeApkSigningBlock, DataSource centralDir, DataSource eocd)97     public byte[] generateVerityTreeRootHash(DataSource beforeApkSigningBlock,
98             DataSource centralDir, DataSource eocd) throws IOException {
99         if (beforeApkSigningBlock.size() % CHUNK_SIZE != 0) {
100             throw new IllegalStateException("APK Signing Block size not a multiple of " + CHUNK_SIZE
101                     + ": " + beforeApkSigningBlock.size());
102         }
103 
104         // Ensure that, when digesting, ZIP End of Central Directory record's Central Directory
105         // offset field is treated as pointing to the offset at which the APK Signing Block will
106         // start.
107         long centralDirOffsetForDigesting = beforeApkSigningBlock.size();
108         ByteBuffer eocdBuf = ByteBuffer.allocate((int) eocd.size());
109         eocdBuf.order(ByteOrder.LITTLE_ENDIAN);
110         eocd.copyTo(0, (int) eocd.size(), eocdBuf);
111         eocdBuf.flip();
112         ZipUtils.setZipEocdCentralDirectoryOffset(eocdBuf, centralDirOffsetForDigesting);
113 
114         return generateVerityTreeRootHash(new ChainedDataSource(beforeApkSigningBlock, centralDir,
115                     DataSources.asDataSource(eocdBuf)));
116     }
117 
118     /**
119      * Returns the root hash of the verity tree built from the data source.
120      */
generateVerityTreeRootHash(DataSource fileSource)121     public byte[] generateVerityTreeRootHash(DataSource fileSource) throws IOException {
122         ByteBuffer verityBuffer = generateVerityTree(fileSource);
123         return getRootHashFromTree(verityBuffer);
124     }
125 
126     /**
127      * Returns the byte buffer that contains the whole verity tree.
128      *
129      * The tree is built bottom up. The bottom level has 256-bit digest for each 4 KB block in the
130      * input file.  If the total size is larger than 4 KB, take this level as input and repeat the
131      * same procedure, until the level is within 4 KB.  If salt is given, it will apply to each
132      * digestion before the actual data.
133      *
134      * The returned root hash is calculated from the last level of 4 KB chunk, similarly with salt.
135      *
136      * The tree is currently stored only in memory and is never written out.  Nevertheless, it is
137      * the actual verity tree format on disk, and is supposed to be re-generated on device.
138      */
generateVerityTree(DataSource fileSource)139     public ByteBuffer generateVerityTree(DataSource fileSource) throws IOException {
140         int digestSize = mMd.getDigestLength();
141 
142         // Calculate the summed area table of level size. In other word, this is the offset
143         // table of each level, plus the next non-existing level.
144         int[] levelOffset = calculateLevelOffset(fileSource.size(), digestSize);
145 
146         ByteBuffer verityBuffer = ByteBuffer.allocate(levelOffset[levelOffset.length - 1]);
147 
148         // Generate the hash tree bottom-up.
149         for (int i = levelOffset.length - 2; i >= 0; i--) {
150             DataSink middleBufferSink = new ByteBufferSink(
151                     slice(verityBuffer, levelOffset[i], levelOffset[i + 1]));
152             DataSource src;
153             if (i == levelOffset.length - 2) {
154                 src = fileSource;
155                 digestDataByChunks(src, middleBufferSink);
156             } else {
157                 src = DataSources.asDataSource(slice(verityBuffer.asReadOnlyBuffer(),
158                         levelOffset[i + 1], levelOffset[i + 2]));
159                 digestDataByChunks(src, middleBufferSink);
160             }
161 
162             // If the output is not full chunk, pad with 0s.
163             long totalOutput = divideRoundup(src.size(), CHUNK_SIZE) * digestSize;
164             int incomplete = (int) (totalOutput % CHUNK_SIZE);
165             if (incomplete > 0) {
166                 byte[] padding = new byte[CHUNK_SIZE - incomplete];
167                 middleBufferSink.consume(padding, 0, padding.length);
168             }
169         }
170         return verityBuffer;
171     }
172 
173     /**
174      * Returns the digested root hash from the top level (only page) of a verity tree.
175      */
getRootHashFromTree(ByteBuffer verityBuffer)176     public byte[] getRootHashFromTree(ByteBuffer verityBuffer) throws IOException {
177         ByteBuffer firstPage = slice(verityBuffer.asReadOnlyBuffer(), 0, CHUNK_SIZE);
178         return saltedDigest(firstPage);
179     }
180 
181     /**
182      * Returns an array of summed area table of level size in the verity tree.  In other words, the
183      * returned array is offset of each level in the verity tree file format, plus an additional
184      * offset of the next non-existing level (i.e. end of the last level + 1).  Thus the array size
185      * is level + 1.
186      */
calculateLevelOffset(long dataSize, int digestSize)187     private static int[] calculateLevelOffset(long dataSize, int digestSize) {
188         // Compute total size of each level, bottom to top.
189         ArrayList<Long> levelSize = new ArrayList<>();
190         while (true) {
191             long chunkCount = divideRoundup(dataSize, CHUNK_SIZE);
192             long size = CHUNK_SIZE * divideRoundup(chunkCount * digestSize, CHUNK_SIZE);
193             levelSize.add(size);
194             if (chunkCount * digestSize <= CHUNK_SIZE) {
195                 break;
196             }
197             dataSize = chunkCount * digestSize;
198         }
199 
200         // Reverse and convert to summed area table.
201         int[] levelOffset = new int[levelSize.size() + 1];
202         levelOffset[0] = 0;
203         for (int i = 0; i < levelSize.size(); i++) {
204             // We don't support verity tree if it is larger then Integer.MAX_VALUE.
205             levelOffset[i + 1] = levelOffset[i] + Math.toIntExact(
206                     levelSize.get(levelSize.size() - i - 1));
207         }
208         return levelOffset;
209     }
210 
211     /**
212      * Digest data source by chunks then feeds them to the sink one by one.  If the last unit is
213      * less than the chunk size and padding is desired, feed with extra padding 0 to fill up the
214      * chunk before digesting.
215      */
digestDataByChunks(DataSource dataSource, DataSink dataSink)216     private void digestDataByChunks(DataSource dataSource, DataSink dataSink) throws IOException {
217         final long size = dataSource.size();
218         final int chunks = (int) divideRoundup(size, CHUNK_SIZE);
219 
220         /** Single IO operation size, in chunks. */
221         final int ioSizeChunks = MAX_PREFETCH_CHUNKS;
222 
223         final byte[][] hashes = new byte[chunks][];
224 
225         Phaser tasks = new Phaser(1);
226 
227         // Reading the input file as fast as we can.
228         final long maxReadSize = ioSizeChunks * CHUNK_SIZE;
229 
230         long readOffset = 0;
231         int startChunkIndex = 0;
232         while (readOffset < size) {
233             final long readLimit = Math.min(readOffset + maxReadSize, size);
234             final int readSize = (int) (readLimit - readOffset);
235             final int bufferSizeChunks = (int) divideRoundup(readSize, CHUNK_SIZE);
236 
237             // Overllocating to zero-pad last chunk.
238             // With 4MiB block size, 32 threads and 4 queue size we might allocate up to 144MiB.
239             final ByteBuffer buffer = ByteBuffer.allocate(bufferSizeChunks * CHUNK_SIZE);
240             dataSource.copyTo(readOffset, readSize, buffer);
241             buffer.rewind();
242 
243             final int readChunkIndex = startChunkIndex;
244             Runnable task = () -> {
245                 final MessageDigest md = cloneMessageDigest();
246                 for (int offset = 0, finish = buffer.capacity(), chunkIndex = readChunkIndex;
247                         offset < finish; offset += CHUNK_SIZE, ++chunkIndex) {
248                     ByteBuffer chunk = slice(buffer, offset, offset + CHUNK_SIZE);
249                     hashes[chunkIndex] = saltedDigest(md, chunk);
250                 }
251                 tasks.arriveAndDeregister();
252             };
253             tasks.register();
254             mExecutor.execute(task);
255 
256             startChunkIndex += bufferSizeChunks;
257             readOffset += readSize;
258         }
259 
260         // Waiting for the tasks to complete.
261         tasks.arriveAndAwaitAdvance();
262 
263         // Streaming hashes back.
264         for (byte[] hash : hashes) {
265             dataSink.consume(hash, 0, hash.length);
266         }
267     }
268 
269     /** Returns the digest of data with salt prepended. */
saltedDigest(ByteBuffer data)270     private byte[] saltedDigest(ByteBuffer data) {
271         return saltedDigest(mMd, data);
272     }
273 
saltedDigest(MessageDigest md, ByteBuffer data)274     private byte[] saltedDigest(MessageDigest md, ByteBuffer data) {
275         md.reset();
276         if (mSalt != null) {
277             md.update(mSalt);
278         }
279         md.update(data);
280         return md.digest();
281     }
282 
283     /** Divides a number and round up to the closest integer. */
divideRoundup(long dividend, long divisor)284     private static long divideRoundup(long dividend, long divisor) {
285         return (dividend + divisor - 1) / divisor;
286     }
287 
288     /** Returns a slice of the buffer with shared the content. */
slice(ByteBuffer buffer, int begin, int end)289     private static ByteBuffer slice(ByteBuffer buffer, int begin, int end) {
290         ByteBuffer b = buffer.duplicate();
291         b.position(0);  // to ensure position <= limit invariant.
292         b.limit(end);
293         b.position(begin);
294         return b.slice();
295     }
296 
297     /**
298      * Obtains a new instance of the message digest algorithm.
299      */
getNewMessageDigest()300     private static MessageDigest getNewMessageDigest() throws NoSuchAlgorithmException {
301         return MessageDigest.getInstance(JCA_ALGORITHM);
302     }
303 
304     /**
305      * Clones the existing message digest, or creates a new instance if clone is unavailable.
306      */
cloneMessageDigest()307     private MessageDigest cloneMessageDigest() {
308         try {
309             return (MessageDigest) mMd.clone();
310         } catch (CloneNotSupportedException ignored) {
311             try {
312                 return getNewMessageDigest();
313             } catch (NoSuchAlgorithmException e) {
314                 throw new IllegalStateException(
315                         "Failed to obtain an instance of a previously available message digest", e);
316             }
317         }
318     }
319 }
320