• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 // This is an on-disk cache which maps a 64-bits key to a byte array.
18 //
19 // It consists of three files: one index file and two data files. One of the
20 // data files is "active", and the other is "inactive". New entries are
21 // appended into the active region until it reaches the size limit. At that
22 // point the active file and the inactive file are swapped, and the new active
23 // file is truncated to empty (and the index for that file is also cleared).
24 // The index is a hash table with linear probing. When the load factor reaches
25 // 0.5, it does the same thing like when the size limit is reached.
26 //
27 // The index file format: (all numbers are stored in little-endian)
28 // [0]  Magic number: 0xB3273030
29 // [4]  MaxEntries: Max number of hash entries per region.
30 // [8]  MaxBytes: Max number of data bytes per region (including header).
31 // [12] ActiveRegion: The active growing region: 0 or 1.
32 // [16] ActiveEntries: The number of hash entries used in the active region.
33 // [20] ActiveBytes: The number of data bytes used in the active region.
34 // [24] Version number.
35 // [28] Checksum of [0..28).
36 // [32] Hash entries for region 0. The size is X = (12 * MaxEntries bytes).
37 // [32 + X] Hash entries for region 1. The size is also X.
38 //
39 // Each hash entry is 12 bytes: 8 bytes key and 4 bytes offset into the data
40 // file. The offset is 0 when the slot is free. Note that 0 is a valid value
41 // for key. The keys are used directly as index into a hash table, so they
42 // should be suitably distributed.
43 //
44 // Each data file stores data for one region. The data file is concatenated
45 // blobs followed by the magic number 0xBD248510.
46 //
47 // The blob format:
48 // [0]  Key of this blob
49 // [8]  Checksum of this blob
50 // [12] Offset of this blob
51 // [16] Length of this blob (not including header)
52 // [20] Blob
53 //
54 // Below are the interface for BlobCache. The instance of this class does not
55 // support concurrent use by multiple threads.
56 //
57 // public BlobCache(String path, int maxEntries, int maxBytes, boolean reset) throws IOException;
58 // public void insert(long key, byte[] data) throws IOException;
59 // public byte[] lookup(long key) throws IOException;
60 // public void lookup(LookupRequest req) throws IOException;
61 // public void close();
62 // public void syncIndex();
63 // public void syncAll();
64 // public static void deleteFiles(String path);
65 //
66 package com.android.gallery3d.common;
67 
68 import android.util.Log;
69 
70 import java.io.Closeable;
71 import java.io.File;
72 import java.io.IOException;
73 import java.io.RandomAccessFile;
74 import java.nio.ByteOrder;
75 import java.nio.MappedByteBuffer;
76 import java.nio.channels.FileChannel;
77 import java.util.Arrays;
78 import java.util.zip.Adler32;
79 
80 public class BlobCache implements Closeable {
81     private static final String TAG = "BlobCache";
82 
83     private static final int MAGIC_INDEX_FILE = 0xB3273030;
84     private static final int MAGIC_DATA_FILE = 0xBD248510;
85 
86     // index header offset
87     private static final int IH_MAGIC = 0;
88     private static final int IH_MAX_ENTRIES = 4;
89     private static final int IH_MAX_BYTES = 8;
90     private static final int IH_ACTIVE_REGION = 12;
91     private static final int IH_ACTIVE_ENTRIES = 16;
92     private static final int IH_ACTIVE_BYTES = 20;
93     private static final int IH_VERSION = 24;
94     private static final int IH_CHECKSUM = 28;
95     private static final int INDEX_HEADER_SIZE = 32;
96 
97     private static final int DATA_HEADER_SIZE = 4;
98 
99     // blob header offset
100     private static final int BH_KEY = 0;
101     private static final int BH_CHECKSUM = 8;
102     private static final int BH_OFFSET = 12;
103     private static final int BH_LENGTH = 16;
104     private static final int BLOB_HEADER_SIZE = 20;
105 
106     private RandomAccessFile mIndexFile;
107     private RandomAccessFile mDataFile0;
108     private RandomAccessFile mDataFile1;
109     private FileChannel mIndexChannel;
110     private MappedByteBuffer mIndexBuffer;
111 
112     private int mMaxEntries;
113     private int mMaxBytes;
114     private int mActiveRegion;
115     private int mActiveEntries;
116     private int mActiveBytes;
117     private int mVersion;
118 
119     private RandomAccessFile mActiveDataFile;
120     private RandomAccessFile mInactiveDataFile;
121     private int mActiveHashStart;
122     private int mInactiveHashStart;
123     private byte[] mIndexHeader = new byte[INDEX_HEADER_SIZE];
124     private byte[] mBlobHeader = new byte[BLOB_HEADER_SIZE];
125     private Adler32 mAdler32 = new Adler32();
126 
127     // Creates the cache. Three files will be created:
128     // path + ".idx", path + ".0", and path + ".1"
129     // The ".0" file and the ".1" file each stores data for a region. Each of
130     // them can grow to the size specified by maxBytes. The maxEntries parameter
131     // specifies the maximum number of entries each region can have. If the
132     // "reset" parameter is true, the cache will be cleared before use.
BlobCache(String path, int maxEntries, int maxBytes, boolean reset)133     public BlobCache(String path, int maxEntries, int maxBytes, boolean reset)
134             throws IOException {
135         this(path, maxEntries, maxBytes, reset, 0);
136     }
137 
BlobCache(String path, int maxEntries, int maxBytes, boolean reset, int version)138     public BlobCache(String path, int maxEntries, int maxBytes, boolean reset,
139             int version) throws IOException {
140         mIndexFile = new RandomAccessFile(path + ".idx", "rw");
141         mDataFile0 = new RandomAccessFile(path + ".0", "rw");
142         mDataFile1 = new RandomAccessFile(path + ".1", "rw");
143         mVersion = version;
144 
145         if (!reset && loadIndex()) {
146             return;
147         }
148 
149         resetCache(maxEntries, maxBytes);
150 
151         if (!loadIndex()) {
152             closeAll();
153             throw new IOException("unable to load index");
154         }
155     }
156 
157     // Delete the files associated with the given path previously created
158     // by the BlobCache constructor.
deleteFiles(String path)159     public static void deleteFiles(String path) {
160         deleteFileSilently(path + ".idx");
161         deleteFileSilently(path + ".0");
162         deleteFileSilently(path + ".1");
163     }
164 
deleteFileSilently(String path)165     private static void deleteFileSilently(String path) {
166         try {
167             new File(path).delete();
168         } catch (Throwable t) {
169             // ignore;
170         }
171     }
172 
173     // Close the cache. All resources are released. No other method should be
174     // called after this is called.
175     @Override
close()176     public void close() {
177         syncAll();
178         closeAll();
179     }
180 
closeAll()181     private void closeAll() {
182         closeSilently(mIndexChannel);
183         closeSilently(mIndexFile);
184         closeSilently(mDataFile0);
185         closeSilently(mDataFile1);
186     }
187 
188     // Returns true if loading index is successful. After this method is called,
189     // mIndexHeader and index header in file should be kept sync.
loadIndex()190     private boolean loadIndex() {
191         try {
192             mIndexFile.seek(0);
193             mDataFile0.seek(0);
194             mDataFile1.seek(0);
195 
196             byte[] buf = mIndexHeader;
197             if (mIndexFile.read(buf) != INDEX_HEADER_SIZE) {
198                 Log.w(TAG, "cannot read header");
199                 return false;
200             }
201 
202             if (readInt(buf, IH_MAGIC) != MAGIC_INDEX_FILE) {
203                 Log.w(TAG, "cannot read header magic");
204                 return false;
205             }
206 
207             if (readInt(buf, IH_VERSION) != mVersion) {
208                 Log.w(TAG, "version mismatch");
209                 return false;
210             }
211 
212             mMaxEntries = readInt(buf, IH_MAX_ENTRIES);
213             mMaxBytes = readInt(buf, IH_MAX_BYTES);
214             mActiveRegion = readInt(buf, IH_ACTIVE_REGION);
215             mActiveEntries = readInt(buf, IH_ACTIVE_ENTRIES);
216             mActiveBytes = readInt(buf, IH_ACTIVE_BYTES);
217 
218             int sum = readInt(buf, IH_CHECKSUM);
219             if (checkSum(buf, 0, IH_CHECKSUM) != sum) {
220                 Log.w(TAG, "header checksum does not match");
221                 return false;
222             }
223 
224             // Sanity check
225             if (mMaxEntries <= 0) {
226                 Log.w(TAG, "invalid max entries");
227                 return false;
228             }
229             if (mMaxBytes <= 0) {
230                 Log.w(TAG, "invalid max bytes");
231                 return false;
232             }
233             if (mActiveRegion != 0 && mActiveRegion != 1) {
234                 Log.w(TAG, "invalid active region");
235                 return false;
236             }
237             if (mActiveEntries < 0 || mActiveEntries > mMaxEntries) {
238                 Log.w(TAG, "invalid active entries");
239                 return false;
240             }
241             if (mActiveBytes < DATA_HEADER_SIZE || mActiveBytes > mMaxBytes) {
242                 Log.w(TAG, "invalid active bytes");
243                 return false;
244             }
245             if (mIndexFile.length() !=
246                     INDEX_HEADER_SIZE + mMaxEntries * 12 * 2) {
247                 Log.w(TAG, "invalid index file length");
248                 return false;
249             }
250 
251             // Make sure data file has magic
252             byte[] magic = new byte[4];
253             if (mDataFile0.read(magic) != 4) {
254                 Log.w(TAG, "cannot read data file magic");
255                 return false;
256             }
257             if (readInt(magic, 0) != MAGIC_DATA_FILE) {
258                 Log.w(TAG, "invalid data file magic");
259                 return false;
260             }
261             if (mDataFile1.read(magic) != 4) {
262                 Log.w(TAG, "cannot read data file magic");
263                 return false;
264             }
265             if (readInt(magic, 0) != MAGIC_DATA_FILE) {
266                 Log.w(TAG, "invalid data file magic");
267                 return false;
268             }
269 
270             // Map index file to memory
271             mIndexChannel = mIndexFile.getChannel();
272             mIndexBuffer = mIndexChannel.map(FileChannel.MapMode.READ_WRITE,
273                     0, mIndexFile.length());
274             mIndexBuffer.order(ByteOrder.LITTLE_ENDIAN);
275 
276             setActiveVariables();
277             return true;
278         } catch (IOException ex) {
279             Log.e(TAG, "loadIndex failed.", ex);
280             return false;
281         }
282     }
283 
setActiveVariables()284     private void setActiveVariables() throws IOException {
285         mActiveDataFile = (mActiveRegion == 0) ? mDataFile0 : mDataFile1;
286         mInactiveDataFile = (mActiveRegion == 1) ? mDataFile0 : mDataFile1;
287         mActiveDataFile.setLength(mActiveBytes);
288         mActiveDataFile.seek(mActiveBytes);
289 
290         mActiveHashStart = INDEX_HEADER_SIZE;
291         mInactiveHashStart = INDEX_HEADER_SIZE;
292 
293         if (mActiveRegion == 0) {
294             mInactiveHashStart += mMaxEntries * 12;
295         } else {
296             mActiveHashStart += mMaxEntries * 12;
297         }
298     }
299 
resetCache(int maxEntries, int maxBytes)300     private void resetCache(int maxEntries, int maxBytes) throws IOException {
301         mIndexFile.setLength(0);  // truncate to zero the index
302         mIndexFile.setLength(INDEX_HEADER_SIZE + maxEntries * 12 * 2);
303         mIndexFile.seek(0);
304         byte[] buf = mIndexHeader;
305         writeInt(buf, IH_MAGIC, MAGIC_INDEX_FILE);
306         writeInt(buf, IH_MAX_ENTRIES, maxEntries);
307         writeInt(buf, IH_MAX_BYTES, maxBytes);
308         writeInt(buf, IH_ACTIVE_REGION, 0);
309         writeInt(buf, IH_ACTIVE_ENTRIES, 0);
310         writeInt(buf, IH_ACTIVE_BYTES, DATA_HEADER_SIZE);
311         writeInt(buf, IH_VERSION, mVersion);
312         writeInt(buf, IH_CHECKSUM, checkSum(buf, 0, IH_CHECKSUM));
313         mIndexFile.write(buf);
314         // This is only needed if setLength does not zero the extended part.
315         // writeZero(mIndexFile, maxEntries * 12 * 2);
316 
317         mDataFile0.setLength(0);
318         mDataFile1.setLength(0);
319         mDataFile0.seek(0);
320         mDataFile1.seek(0);
321         writeInt(buf, 0, MAGIC_DATA_FILE);
322         mDataFile0.write(buf, 0, 4);
323         mDataFile1.write(buf, 0, 4);
324     }
325 
326     // Flip the active region and the inactive region.
flipRegion()327     private void flipRegion() throws IOException {
328         mActiveRegion = 1 - mActiveRegion;
329         mActiveEntries = 0;
330         mActiveBytes = DATA_HEADER_SIZE;
331 
332         writeInt(mIndexHeader, IH_ACTIVE_REGION, mActiveRegion);
333         writeInt(mIndexHeader, IH_ACTIVE_ENTRIES, mActiveEntries);
334         writeInt(mIndexHeader, IH_ACTIVE_BYTES, mActiveBytes);
335         updateIndexHeader();
336 
337         setActiveVariables();
338         clearHash(mActiveHashStart);
339         syncIndex();
340     }
341 
342     // Sync mIndexHeader to the index file.
updateIndexHeader()343     private void updateIndexHeader() {
344         writeInt(mIndexHeader, IH_CHECKSUM,
345                 checkSum(mIndexHeader, 0, IH_CHECKSUM));
346         mIndexBuffer.position(0);
347         mIndexBuffer.put(mIndexHeader);
348     }
349 
350     // Clear the hash table starting from the specified offset.
clearHash(int hashStart)351     private void clearHash(int hashStart) {
352         byte[] zero = new byte[1024];
353         mIndexBuffer.position(hashStart);
354         for (int count = mMaxEntries * 12; count > 0;) {
355             int todo = Math.min(count, 1024);
356             mIndexBuffer.put(zero, 0, todo);
357             count -= todo;
358         }
359     }
360 
361     // Inserts a (key, data) pair into the cache.
insert(long key, byte[] data)362     public void insert(long key, byte[] data) throws IOException {
363         if (DATA_HEADER_SIZE + BLOB_HEADER_SIZE + data.length > mMaxBytes) {
364             throw new RuntimeException("blob is too large!");
365         }
366 
367         if (mActiveBytes + BLOB_HEADER_SIZE + data.length > mMaxBytes
368                 || mActiveEntries * 2 >= mMaxEntries) {
369             flipRegion();
370         }
371 
372         if (!lookupInternal(key, mActiveHashStart)) {
373             // If we don't have an existing entry with the same key, increase
374             // the entry count.
375             mActiveEntries++;
376             writeInt(mIndexHeader, IH_ACTIVE_ENTRIES, mActiveEntries);
377         }
378 
379         insertInternal(key, data, data.length);
380         updateIndexHeader();
381     }
382 
clearEntry(long key)383     public void clearEntry(long key) throws IOException {
384         if (!lookupInternal(key, mActiveHashStart)) {
385             return; // Nothing to clear
386         }
387         byte[] header = mBlobHeader;
388         Arrays.fill(header, (byte) 0);
389         mActiveDataFile.seek(mFileOffset);
390         mActiveDataFile.write(header);
391     }
392 
393     // Appends the data to the active file. It also updates the hash entry.
394     // The proper hash entry (suitable for insertion or replacement) must be
395     // pointed by mSlotOffset.
insertInternal(long key, byte[] data, int length)396     private void insertInternal(long key, byte[] data, int length)
397             throws IOException {
398         byte[] header = mBlobHeader;
399         int sum = checkSum(data);
400         writeLong(header, BH_KEY, key);
401         writeInt(header, BH_CHECKSUM, sum);
402         writeInt(header, BH_OFFSET, mActiveBytes);
403         writeInt(header, BH_LENGTH, length);
404         mActiveDataFile.write(header);
405         mActiveDataFile.write(data, 0, length);
406 
407         mIndexBuffer.putLong(mSlotOffset, key);
408         mIndexBuffer.putInt(mSlotOffset + 8, mActiveBytes);
409         mActiveBytes += BLOB_HEADER_SIZE + length;
410         writeInt(mIndexHeader, IH_ACTIVE_BYTES, mActiveBytes);
411     }
412 
413     public static class LookupRequest {
414         public long key;        // input: the key to find
415         public byte[] buffer;   // input/output: the buffer to store the blob
416         public int length;      // output: the length of the blob
417     }
418 
419     // This method is for one-off lookup. For repeated lookup, use the version
420     // accepting LookupRequest to avoid repeated memory allocation.
421     private LookupRequest mLookupRequest = new LookupRequest();
lookup(long key)422     public byte[] lookup(long key) throws IOException {
423         mLookupRequest.key = key;
424         mLookupRequest.buffer = null;
425         if (lookup(mLookupRequest)) {
426             return mLookupRequest.buffer;
427         } else {
428             return null;
429         }
430     }
431 
432     // Returns true if the associated blob for the given key is available.
433     // The blob is stored in the buffer pointed by req.buffer, and the length
434     // is in stored in the req.length variable.
435     //
436     // The user can input a non-null value in req.buffer, and this method will
437     // try to use that buffer. If that buffer is not large enough, this method
438     // will allocate a new buffer and assign it to req.buffer.
439     //
440     // This method tries not to throw IOException even if the data file is
441     // corrupted, but it can still throw IOException if things get strange.
lookup(LookupRequest req)442     public boolean lookup(LookupRequest req) throws IOException {
443         // Look up in the active region first.
444         if (lookupInternal(req.key, mActiveHashStart)) {
445             if (getBlob(mActiveDataFile, mFileOffset, req)) {
446                 return true;
447             }
448         }
449 
450         // We want to copy the data from the inactive file to the active file
451         // if it's available. So we keep the offset of the hash entry so we can
452         // avoid looking it up again.
453         int insertOffset = mSlotOffset;
454 
455         // Look up in the inactive region.
456         if (lookupInternal(req.key, mInactiveHashStart)) {
457             if (getBlob(mInactiveDataFile, mFileOffset, req)) {
458                 // If we don't have enough space to insert this blob into
459                 // the active file, just return it.
460                 if (mActiveBytes + BLOB_HEADER_SIZE + req.length > mMaxBytes
461                     || mActiveEntries * 2 >= mMaxEntries) {
462                     return true;
463                 }
464                 // Otherwise copy it over.
465                 mSlotOffset = insertOffset;
466                 try {
467                     insertInternal(req.key, req.buffer, req.length);
468                     mActiveEntries++;
469                     writeInt(mIndexHeader, IH_ACTIVE_ENTRIES, mActiveEntries);
470                     updateIndexHeader();
471                 } catch (Throwable t) {
472                     Log.e(TAG, "cannot copy over");
473                 }
474                 return true;
475             }
476         }
477 
478         return false;
479     }
480 
481 
482     // Copies the blob for the specified offset in the specified file to
483     // req.buffer. If req.buffer is null or too small, allocate a buffer and
484     // assign it to req.buffer.
485     // Returns false if the blob is not available (either the index file is
486     // not sync with the data file, or one of them is corrupted). The length
487     // of the blob is stored in the req.length variable.
getBlob(RandomAccessFile file, int offset, LookupRequest req)488     private boolean getBlob(RandomAccessFile file, int offset,
489             LookupRequest req) throws IOException {
490         byte[] header = mBlobHeader;
491         long oldPosition = file.getFilePointer();
492         try {
493             file.seek(offset);
494             if (file.read(header) != BLOB_HEADER_SIZE) {
495                 Log.w(TAG, "cannot read blob header");
496                 return false;
497             }
498             long blobKey = readLong(header, BH_KEY);
499             if (blobKey == 0) {
500                 return false; // This entry has been cleared.
501             }
502             if (blobKey != req.key) {
503                 Log.w(TAG, "blob key does not match: " + blobKey);
504                 return false;
505             }
506             int sum = readInt(header, BH_CHECKSUM);
507             int blobOffset = readInt(header, BH_OFFSET);
508             if (blobOffset != offset) {
509                 Log.w(TAG, "blob offset does not match: " + blobOffset);
510                 return false;
511             }
512             int length = readInt(header, BH_LENGTH);
513             if (length < 0 || length > mMaxBytes - offset - BLOB_HEADER_SIZE) {
514                 Log.w(TAG, "invalid blob length: " + length);
515                 return false;
516             }
517             if (req.buffer == null || req.buffer.length < length) {
518                 req.buffer = new byte[length];
519             }
520 
521             byte[] blob = req.buffer;
522             req.length = length;
523 
524             if (file.read(blob, 0, length) != length) {
525                 Log.w(TAG, "cannot read blob data");
526                 return false;
527             }
528             if (checkSum(blob, 0, length) != sum) {
529                 Log.w(TAG, "blob checksum does not match: " + sum);
530                 return false;
531             }
532             return true;
533         } catch (Throwable t)  {
534             Log.e(TAG, "getBlob failed.", t);
535             return false;
536         } finally {
537             file.seek(oldPosition);
538         }
539     }
540 
541     // Tries to look up a key in the specified hash region.
542     // Returns true if the lookup is successful.
543     // The slot offset in the index file is saved in mSlotOffset. If the lookup
544     // is successful, it's the slot found. Otherwise it's the slot suitable for
545     // insertion.
546     // If the lookup is successful, the file offset is also saved in
547     // mFileOffset.
548     private int mSlotOffset;
549     private int mFileOffset;
lookupInternal(long key, int hashStart)550     private boolean lookupInternal(long key, int hashStart) {
551         int slot = (int) (key % mMaxEntries);
552         if (slot < 0) slot += mMaxEntries;
553         int slotBegin = slot;
554         while (true) {
555             int offset = hashStart + slot * 12;
556             long candidateKey = mIndexBuffer.getLong(offset);
557             int candidateOffset = mIndexBuffer.getInt(offset + 8);
558             if (candidateOffset == 0) {
559                 mSlotOffset = offset;
560                 return false;
561             } else if (candidateKey == key) {
562                 mSlotOffset = offset;
563                 mFileOffset = candidateOffset;
564                 return true;
565             } else {
566                 if (++slot >= mMaxEntries) {
567                     slot = 0;
568                 }
569                 if (slot == slotBegin) {
570                     Log.w(TAG, "corrupted index: clear the slot.");
571                     mIndexBuffer.putInt(hashStart + slot * 12 + 8, 0);
572                 }
573             }
574         }
575     }
576 
syncIndex()577     public void syncIndex() {
578         try {
579             mIndexBuffer.force();
580         } catch (Throwable t) {
581             Log.w(TAG, "sync index failed", t);
582         }
583     }
584 
syncAll()585     public void syncAll() {
586         syncIndex();
587         try {
588             mDataFile0.getFD().sync();
589         } catch (Throwable t) {
590             Log.w(TAG, "sync data file 0 failed", t);
591         }
592         try {
593             mDataFile1.getFD().sync();
594         } catch (Throwable t) {
595             Log.w(TAG, "sync data file 1 failed", t);
596         }
597     }
598 
599     // This is for testing only.
600     //
601     // Returns the active count (mActiveEntries). This also verifies that
602     // the active count matches matches what's inside the hash region.
getActiveCount()603     int getActiveCount() {
604         int count = 0;
605         for (int i = 0; i < mMaxEntries; i++) {
606             int offset = mActiveHashStart + i * 12;
607             long candidateKey = mIndexBuffer.getLong(offset);
608             int candidateOffset = mIndexBuffer.getInt(offset + 8);
609             if (candidateOffset != 0) ++count;
610         }
611         if (count == mActiveEntries) {
612             return count;
613         } else {
614             Log.e(TAG, "wrong active count: " + mActiveEntries + " vs " + count);
615             return -1;  // signal failure.
616         }
617     }
618 
checkSum(byte[] data)619     int checkSum(byte[] data) {
620         mAdler32.reset();
621         mAdler32.update(data);
622         return (int) mAdler32.getValue();
623     }
624 
checkSum(byte[] data, int offset, int nbytes)625     int checkSum(byte[] data, int offset, int nbytes) {
626         mAdler32.reset();
627         mAdler32.update(data, offset, nbytes);
628         return (int) mAdler32.getValue();
629     }
630 
closeSilently(Closeable c)631     static void closeSilently(Closeable c) {
632         if (c == null) return;
633         try {
634             c.close();
635         } catch (Throwable t) {
636             // do nothing
637         }
638     }
639 
readInt(byte[] buf, int offset)640     static int readInt(byte[] buf, int offset) {
641         return (buf[offset] & 0xff)
642                 | ((buf[offset + 1] & 0xff) << 8)
643                 | ((buf[offset + 2] & 0xff) << 16)
644                 | ((buf[offset + 3] & 0xff) << 24);
645     }
646 
readLong(byte[] buf, int offset)647     static long readLong(byte[] buf, int offset) {
648         long result = buf[offset + 7] & 0xff;
649         for (int i = 6; i >= 0; i--) {
650             result = (result << 8) | (buf[offset + i] & 0xff);
651         }
652         return result;
653     }
654 
writeInt(byte[] buf, int offset, int value)655     static void writeInt(byte[] buf, int offset, int value) {
656         for (int i = 0; i < 4; i++) {
657             buf[offset + i] = (byte) (value & 0xff);
658             value >>= 8;
659         }
660     }
661 
writeLong(byte[] buf, int offset, long value)662     static void writeLong(byte[] buf, int offset, long value) {
663         for (int i = 0; i < 8; i++) {
664             buf[offset + i] = (byte) (value & 0xff);
665             value >>= 8;
666         }
667     }
668 }
669