1 /*
2 * Copyright (c) 2017-present, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11 /* *********************************************************
12 * Turn on Large Files support (>4GB) for 32-bit Linux/Unix
13 ***********************************************************/
14 #if !defined(__64BIT__) || defined(__MINGW32__) /* No point defining Large file for 64 bit but MinGW-w64 requires it */
15 # if !defined(_FILE_OFFSET_BITS)
16 # define _FILE_OFFSET_BITS 64 /* turn off_t into a 64-bit type for ftello, fseeko */
17 # endif
18 # if !defined(_LARGEFILE_SOURCE) /* obsolete macro, replaced with _FILE_OFFSET_BITS */
19 # define _LARGEFILE_SOURCE 1 /* Large File Support extension (LFS) - fseeko, ftello */
20 # endif
21 # if defined(_AIX) || defined(__hpux)
22 # define _LARGE_FILES /* Large file support on 32-bits AIX and HP-UX */
23 # endif
24 #endif
25
26 /* ************************************************************
27 * Avoid fseek()'s 2GiB barrier with MSVC, macOS, *BSD, MinGW
28 ***************************************************************/
29 #if defined(_MSC_VER) && _MSC_VER >= 1400
30 # define LONG_SEEK _fseeki64
31 #elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */
32 # define LONG_SEEK fseeko
33 #elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__)
34 # define LONG_SEEK fseeko64
35 #elif defined(_WIN32) && !defined(__DJGPP__)
36 # include <windows.h>
LONG_SEEK(FILE * file,__int64 offset,int origin)37 static int LONG_SEEK(FILE* file, __int64 offset, int origin) {
38 LARGE_INTEGER off;
39 DWORD method;
40 off.QuadPart = offset;
41 if (origin == SEEK_END)
42 method = FILE_END;
43 else if (origin == SEEK_CUR)
44 method = FILE_CURRENT;
45 else
46 method = FILE_BEGIN;
47
48 if (SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, NULL, method))
49 return 0;
50 else
51 return -1;
52 }
53 #else
54 # define LONG_SEEK fseek
55 #endif
56
57 #include <stdlib.h> /* malloc, free */
58 #include <stdio.h> /* FILE* */
59 #include <limits.h> /* UNIT_MAX */
60 #include <assert.h>
61
62 #define XXH_STATIC_LINKING_ONLY
63 #include "xxhash.h"
64
65 #define ZSTD_STATIC_LINKING_ONLY
66 #include "zstd.h"
67 #include "zstd_errors.h"
68 #include "mem.h"
69 #include "zstd_seekable.h"
70
71 #undef ERROR
72 #define ERROR(name) ((size_t)-ZSTD_error_##name)
73
74 #define CHECK_IO(f) { int const errcod = (f); if (errcod < 0) return ERROR(seekableIO); }
75
76 #undef MIN
77 #undef MAX
78 #define MIN(a, b) ((a) < (b) ? (a) : (b))
79 #define MAX(a, b) ((a) > (b) ? (a) : (b))
80
81 #define ZSTD_SEEKABLE_NO_OUTPUT_PROGRESS_MAX 16
82
83 /* Special-case callbacks for FILE* and in-memory modes, so that we can treat
84 * them the same way as the advanced API */
ZSTD_seekable_read_FILE(void * opaque,void * buffer,size_t n)85 static int ZSTD_seekable_read_FILE(void* opaque, void* buffer, size_t n)
86 {
87 size_t const result = fread(buffer, 1, n, (FILE*)opaque);
88 if (result != n) {
89 return -1;
90 }
91 return 0;
92 }
93
ZSTD_seekable_seek_FILE(void * opaque,long long offset,int origin)94 static int ZSTD_seekable_seek_FILE(void* opaque, long long offset, int origin)
95 {
96 int const ret = LONG_SEEK((FILE*)opaque, offset, origin);
97 if (ret) return ret;
98 return fflush((FILE*)opaque);
99 }
100
101 typedef struct {
102 const void *ptr;
103 size_t size;
104 size_t pos;
105 } buffWrapper_t;
106
ZSTD_seekable_read_buff(void * opaque,void * buffer,size_t n)107 static int ZSTD_seekable_read_buff(void* opaque, void* buffer, size_t n)
108 {
109 buffWrapper_t* const buff = (buffWrapper_t*)opaque;
110 assert(buff != NULL);
111 if (buff->pos + n > buff->size) return -1;
112 memcpy(buffer, (const BYTE*)buff->ptr + buff->pos, n);
113 buff->pos += n;
114 return 0;
115 }
116
ZSTD_seekable_seek_buff(void * opaque,long long offset,int origin)117 static int ZSTD_seekable_seek_buff(void* opaque, long long offset, int origin)
118 {
119 buffWrapper_t* const buff = (buffWrapper_t*) opaque;
120 unsigned long long newOffset;
121 assert(buff != NULL);
122 switch (origin) {
123 case SEEK_SET:
124 assert(offset >= 0);
125 newOffset = (unsigned long long)offset;
126 break;
127 case SEEK_CUR:
128 newOffset = (unsigned long long)((long long)buff->pos + offset);
129 break;
130 case SEEK_END:
131 newOffset = (unsigned long long)((long long)buff->size + offset);
132 break;
133 default:
134 assert(0); /* not possible */
135 }
136 if (newOffset > buff->size) {
137 return -1;
138 }
139 buff->pos = newOffset;
140 return 0;
141 }
142
143 typedef struct {
144 U64 cOffset;
145 U64 dOffset;
146 U32 checksum;
147 } seekEntry_t;
148
149 struct ZSTD_seekTable_s {
150 seekEntry_t* entries;
151 size_t tableLen;
152
153 int checksumFlag;
154 };
155
156 #define SEEKABLE_BUFF_SIZE ZSTD_BLOCKSIZE_MAX
157
158 struct ZSTD_seekable_s {
159 ZSTD_DStream* dstream;
160 ZSTD_seekTable seekTable;
161 ZSTD_seekable_customFile src;
162
163 U64 decompressedOffset;
164 U32 curFrame;
165
166 BYTE inBuff[SEEKABLE_BUFF_SIZE]; /* need to do our own input buffering */
167 BYTE outBuff[SEEKABLE_BUFF_SIZE]; /* so we can efficiently decompress the
168 starts of chunks before we get to the
169 desired section */
170 ZSTD_inBuffer in; /* maintain continuity across ZSTD_seekable_decompress operations */
171 buffWrapper_t buffWrapper; /* for `src.opaque` in in-memory mode */
172
173 XXH64_state_t xxhState;
174 };
175
ZSTD_seekable_create(void)176 ZSTD_seekable* ZSTD_seekable_create(void)
177 {
178 ZSTD_seekable* const zs = (ZSTD_seekable*)malloc(sizeof(ZSTD_seekable));
179 if (zs == NULL) return NULL;
180
181 /* also initializes stage to zsds_init */
182 memset(zs, 0, sizeof(*zs));
183
184 zs->dstream = ZSTD_createDStream();
185 if (zs->dstream == NULL) {
186 free(zs);
187 return NULL;
188 }
189
190 return zs;
191 }
192
ZSTD_seekable_free(ZSTD_seekable * zs)193 size_t ZSTD_seekable_free(ZSTD_seekable* zs)
194 {
195 if (zs == NULL) return 0; /* support free on null */
196 ZSTD_freeDStream(zs->dstream);
197 free(zs->seekTable.entries);
198 free(zs);
199 return 0;
200 }
201
ZSTD_seekTable_create_fromSeekable(const ZSTD_seekable * zs)202 ZSTD_seekTable* ZSTD_seekTable_create_fromSeekable(const ZSTD_seekable* zs)
203 {
204 ZSTD_seekTable* const st = (ZSTD_seekTable*)malloc(sizeof(ZSTD_seekTable));
205 if (st==NULL) return NULL;
206
207 st->checksumFlag = zs->seekTable.checksumFlag;
208 st->tableLen = zs->seekTable.tableLen;
209
210 /* Allocate an extra entry at the end to match logic of initial allocation */
211 size_t const entriesSize = sizeof(seekEntry_t) * (zs->seekTable.tableLen + 1);
212 seekEntry_t* const entries = (seekEntry_t*)malloc(entriesSize);
213 if (entries==NULL) {
214 free(st);
215 return NULL;
216 }
217
218 memcpy(entries, zs->seekTable.entries, entriesSize);
219 st->entries = entries;
220 return st;
221 }
222
ZSTD_seekTable_free(ZSTD_seekTable * st)223 size_t ZSTD_seekTable_free(ZSTD_seekTable* st)
224 {
225 if (st == NULL) return 0; /* support free on null */
226 free(st->entries);
227 free(st);
228 return 0;
229 }
230
231 /** ZSTD_seekable_offsetToFrameIndex() :
232 * Performs a binary search to find the last frame with a decompressed offset
233 * <= pos
234 * @return : the frame's index */
ZSTD_seekable_offsetToFrameIndex(const ZSTD_seekable * zs,unsigned long long pos)235 unsigned ZSTD_seekable_offsetToFrameIndex(const ZSTD_seekable* zs, unsigned long long pos)
236 {
237 return ZSTD_seekTable_offsetToFrameIndex(&zs->seekTable, pos);
238 }
239
ZSTD_seekTable_offsetToFrameIndex(const ZSTD_seekTable * st,unsigned long long pos)240 unsigned ZSTD_seekTable_offsetToFrameIndex(const ZSTD_seekTable* st, unsigned long long pos)
241 {
242 U32 lo = 0;
243 U32 hi = (U32)st->tableLen;
244 assert(st->tableLen <= UINT_MAX);
245
246 if (pos >= st->entries[st->tableLen].dOffset) {
247 return (unsigned)st->tableLen;
248 }
249
250 while (lo + 1 < hi) {
251 U32 const mid = lo + ((hi - lo) >> 1);
252 if (st->entries[mid].dOffset <= pos) {
253 lo = mid;
254 } else {
255 hi = mid;
256 }
257 }
258 return lo;
259 }
260
ZSTD_seekable_getNumFrames(const ZSTD_seekable * zs)261 unsigned ZSTD_seekable_getNumFrames(const ZSTD_seekable* zs)
262 {
263 return ZSTD_seekTable_getNumFrames(&zs->seekTable);
264 }
265
ZSTD_seekTable_getNumFrames(const ZSTD_seekTable * st)266 unsigned ZSTD_seekTable_getNumFrames(const ZSTD_seekTable* st)
267 {
268 assert(st->tableLen <= UINT_MAX);
269 return (unsigned)st->tableLen;
270 }
271
ZSTD_seekable_getFrameCompressedOffset(const ZSTD_seekable * zs,unsigned frameIndex)272 unsigned long long ZSTD_seekable_getFrameCompressedOffset(const ZSTD_seekable* zs, unsigned frameIndex)
273 {
274 return ZSTD_seekTable_getFrameCompressedOffset(&zs->seekTable, frameIndex);
275 }
276
ZSTD_seekTable_getFrameCompressedOffset(const ZSTD_seekTable * st,unsigned frameIndex)277 unsigned long long ZSTD_seekTable_getFrameCompressedOffset(const ZSTD_seekTable* st, unsigned frameIndex)
278 {
279 if (frameIndex >= st->tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
280 return st->entries[frameIndex].cOffset;
281 }
282
ZSTD_seekable_getFrameDecompressedOffset(const ZSTD_seekable * zs,unsigned frameIndex)283 unsigned long long ZSTD_seekable_getFrameDecompressedOffset(const ZSTD_seekable* zs, unsigned frameIndex)
284 {
285 return ZSTD_seekTable_getFrameDecompressedOffset(&zs->seekTable, frameIndex);
286 }
287
ZSTD_seekTable_getFrameDecompressedOffset(const ZSTD_seekTable * st,unsigned frameIndex)288 unsigned long long ZSTD_seekTable_getFrameDecompressedOffset(const ZSTD_seekTable* st, unsigned frameIndex)
289 {
290 if (frameIndex >= st->tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
291 return st->entries[frameIndex].dOffset;
292 }
293
ZSTD_seekable_getFrameCompressedSize(const ZSTD_seekable * zs,unsigned frameIndex)294 size_t ZSTD_seekable_getFrameCompressedSize(const ZSTD_seekable* zs, unsigned frameIndex)
295 {
296 return ZSTD_seekTable_getFrameCompressedSize(&zs->seekTable, frameIndex);
297 }
298
ZSTD_seekTable_getFrameCompressedSize(const ZSTD_seekTable * st,unsigned frameIndex)299 size_t ZSTD_seekTable_getFrameCompressedSize(const ZSTD_seekTable* st, unsigned frameIndex)
300 {
301 if (frameIndex >= st->tableLen) return ERROR(frameIndex_tooLarge);
302 return st->entries[frameIndex + 1].cOffset -
303 st->entries[frameIndex].cOffset;
304 }
305
ZSTD_seekable_getFrameDecompressedSize(const ZSTD_seekable * zs,unsigned frameIndex)306 size_t ZSTD_seekable_getFrameDecompressedSize(const ZSTD_seekable* zs, unsigned frameIndex)
307 {
308 return ZSTD_seekTable_getFrameDecompressedSize(&zs->seekTable, frameIndex);
309 }
310
ZSTD_seekTable_getFrameDecompressedSize(const ZSTD_seekTable * st,unsigned frameIndex)311 size_t ZSTD_seekTable_getFrameDecompressedSize(const ZSTD_seekTable* st, unsigned frameIndex)
312 {
313 if (frameIndex > st->tableLen) return ERROR(frameIndex_tooLarge);
314 return st->entries[frameIndex + 1].dOffset -
315 st->entries[frameIndex].dOffset;
316 }
317
ZSTD_seekable_loadSeekTable(ZSTD_seekable * zs)318 static size_t ZSTD_seekable_loadSeekTable(ZSTD_seekable* zs)
319 {
320 int checksumFlag;
321 ZSTD_seekable_customFile src = zs->src;
322 /* read the footer, fixed size */
323 CHECK_IO(src.seek(src.opaque, -(int)ZSTD_seekTableFooterSize, SEEK_END));
324 CHECK_IO(src.read(src.opaque, zs->inBuff, ZSTD_seekTableFooterSize));
325
326 if (MEM_readLE32(zs->inBuff + 5) != ZSTD_SEEKABLE_MAGICNUMBER) {
327 return ERROR(prefix_unknown);
328 }
329
330 { BYTE const sfd = zs->inBuff[4];
331 checksumFlag = sfd >> 7;
332
333 /* check reserved bits */
334 if ((sfd >> 2) & 0x1f) {
335 return ERROR(corruption_detected);
336 } }
337
338 { U32 const numFrames = MEM_readLE32(zs->inBuff);
339 U32 const sizePerEntry = 8 + (checksumFlag?4:0);
340 U32 const tableSize = sizePerEntry * numFrames;
341 U32 const frameSize = tableSize + ZSTD_seekTableFooterSize + ZSTD_SKIPPABLEHEADERSIZE;
342
343 U32 remaining = frameSize - ZSTD_seekTableFooterSize; /* don't need to re-read footer */
344 { U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE);
345 CHECK_IO(src.seek(src.opaque, -(S64)frameSize, SEEK_END));
346 CHECK_IO(src.read(src.opaque, zs->inBuff, toRead));
347 remaining -= toRead;
348 }
349
350 if (MEM_readLE32(zs->inBuff) != (ZSTD_MAGIC_SKIPPABLE_START | 0xE)) {
351 return ERROR(prefix_unknown);
352 }
353 if (MEM_readLE32(zs->inBuff+4) + ZSTD_SKIPPABLEHEADERSIZE != frameSize) {
354 return ERROR(prefix_unknown);
355 }
356
357 { /* Allocate an extra entry at the end so that we can do size
358 * computations on the last element without special case */
359 seekEntry_t* const entries = (seekEntry_t*)malloc(sizeof(seekEntry_t) * (numFrames + 1));
360
361 U32 idx = 0;
362 U32 pos = 8;
363
364 U64 cOffset = 0;
365 U64 dOffset = 0;
366
367 if (entries == NULL) return ERROR(memory_allocation);
368
369 /* compute cumulative positions */
370 for (; idx < numFrames; idx++) {
371 if (pos + sizePerEntry > SEEKABLE_BUFF_SIZE) {
372 U32 const offset = SEEKABLE_BUFF_SIZE - pos;
373 U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE - offset);
374 memmove(zs->inBuff, zs->inBuff + pos, offset); /* move any data we haven't read yet */
375 CHECK_IO(src.read(src.opaque, zs->inBuff+offset, toRead));
376 remaining -= toRead;
377 pos = 0;
378 }
379 entries[idx].cOffset = cOffset;
380 entries[idx].dOffset = dOffset;
381
382 cOffset += MEM_readLE32(zs->inBuff + pos);
383 pos += 4;
384 dOffset += MEM_readLE32(zs->inBuff + pos);
385 pos += 4;
386 if (checksumFlag) {
387 entries[idx].checksum = MEM_readLE32(zs->inBuff + pos);
388 pos += 4;
389 }
390 }
391 entries[numFrames].cOffset = cOffset;
392 entries[numFrames].dOffset = dOffset;
393
394 zs->seekTable.entries = entries;
395 zs->seekTable.tableLen = numFrames;
396 zs->seekTable.checksumFlag = checksumFlag;
397 return 0;
398 }
399 }
400 }
401
ZSTD_seekable_initBuff(ZSTD_seekable * zs,const void * src,size_t srcSize)402 size_t ZSTD_seekable_initBuff(ZSTD_seekable* zs, const void* src, size_t srcSize)
403 {
404 zs->buffWrapper = (buffWrapper_t){src, srcSize, 0};
405 { ZSTD_seekable_customFile srcFile = {&zs->buffWrapper,
406 &ZSTD_seekable_read_buff,
407 &ZSTD_seekable_seek_buff};
408 return ZSTD_seekable_initAdvanced(zs, srcFile); }
409 }
410
ZSTD_seekable_initFile(ZSTD_seekable * zs,FILE * src)411 size_t ZSTD_seekable_initFile(ZSTD_seekable* zs, FILE* src)
412 {
413 ZSTD_seekable_customFile srcFile = {src, &ZSTD_seekable_read_FILE,
414 &ZSTD_seekable_seek_FILE};
415 return ZSTD_seekable_initAdvanced(zs, srcFile);
416 }
417
ZSTD_seekable_initAdvanced(ZSTD_seekable * zs,ZSTD_seekable_customFile src)418 size_t ZSTD_seekable_initAdvanced(ZSTD_seekable* zs, ZSTD_seekable_customFile src)
419 {
420 zs->src = src;
421
422 { const size_t seekTableInit = ZSTD_seekable_loadSeekTable(zs);
423 if (ZSTD_isError(seekTableInit)) return seekTableInit; }
424
425 zs->decompressedOffset = (U64)-1;
426 zs->curFrame = (U32)-1;
427
428 { const size_t dstreamInit = ZSTD_initDStream(zs->dstream);
429 if (ZSTD_isError(dstreamInit)) return dstreamInit; }
430 return 0;
431 }
432
ZSTD_seekable_decompress(ZSTD_seekable * zs,void * dst,size_t len,unsigned long long offset)433 size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, unsigned long long offset)
434 {
435 unsigned long long const eos = zs->seekTable.entries[zs->seekTable.tableLen].dOffset;
436 if (offset + len > eos) {
437 len = eos - offset;
438 }
439
440 U32 targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, offset);
441 U32 noOutputProgressCount = 0;
442 size_t srcBytesRead = 0;
443 do {
444 /* check if we can continue from a previous decompress job */
445 if (targetFrame != zs->curFrame || offset != zs->decompressedOffset) {
446 zs->decompressedOffset = zs->seekTable.entries[targetFrame].dOffset;
447 zs->curFrame = targetFrame;
448
449 assert(zs->seekTable.entries[targetFrame].cOffset < LLONG_MAX);
450 CHECK_IO(zs->src.seek(zs->src.opaque,
451 (long long)zs->seekTable.entries[targetFrame].cOffset,
452 SEEK_SET));
453 zs->in = (ZSTD_inBuffer){zs->inBuff, 0, 0};
454 XXH64_reset(&zs->xxhState, 0);
455 ZSTD_DCtx_reset(zs->dstream, ZSTD_reset_session_only);
456 if (zs->buffWrapper.size && srcBytesRead > zs->buffWrapper.size) {
457 return ERROR(seekableIO);
458 }
459 }
460
461 while (zs->decompressedOffset < offset + len) {
462 size_t toRead;
463 ZSTD_outBuffer outTmp;
464 size_t prevOutPos;
465 size_t prevInPos;
466 size_t forwardProgress;
467 if (zs->decompressedOffset < offset) {
468 /* dummy decompressions until we get to the target offset */
469 outTmp = (ZSTD_outBuffer){zs->outBuff, MIN(SEEKABLE_BUFF_SIZE, offset - zs->decompressedOffset), 0};
470 } else {
471 outTmp = (ZSTD_outBuffer){dst, len, zs->decompressedOffset - offset};
472 }
473
474 prevOutPos = outTmp.pos;
475 prevInPos = zs->in.pos;
476 toRead = ZSTD_decompressStream(zs->dstream, &outTmp, &zs->in);
477 if (ZSTD_isError(toRead)) {
478 return toRead;
479 }
480
481 if (zs->seekTable.checksumFlag) {
482 XXH64_update(&zs->xxhState, (BYTE*)outTmp.dst + prevOutPos,
483 outTmp.pos - prevOutPos);
484 }
485 forwardProgress = outTmp.pos - prevOutPos;
486 if (forwardProgress == 0) {
487 if (noOutputProgressCount++ > ZSTD_SEEKABLE_NO_OUTPUT_PROGRESS_MAX) {
488 return ERROR(seekableIO);
489 }
490 } else {
491 noOutputProgressCount = 0;
492 }
493 zs->decompressedOffset += forwardProgress;
494 srcBytesRead += zs->in.pos - prevInPos;
495
496 if (toRead == 0) {
497 /* frame complete */
498
499 /* verify checksum */
500 if (zs->seekTable.checksumFlag &&
501 (XXH64_digest(&zs->xxhState) & 0xFFFFFFFFU) !=
502 zs->seekTable.entries[targetFrame].checksum) {
503 return ERROR(corruption_detected);
504 }
505
506 if (zs->decompressedOffset < offset + len) {
507 /* go back to the start and force a reset of the stream */
508 targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, zs->decompressedOffset);
509 /* in this case it will fail later with corruption_detected, since last block does not have checksum */
510 assert(targetFrame != zs->seekTable.tableLen);
511 }
512 break;
513 }
514
515 /* read in more data if we're done with this buffer */
516 if (zs->in.pos == zs->in.size) {
517 toRead = MIN(toRead, SEEKABLE_BUFF_SIZE);
518 CHECK_IO(zs->src.read(zs->src.opaque, zs->inBuff, toRead));
519 zs->in.size = toRead;
520 zs->in.pos = 0;
521 }
522 } /* while (zs->decompressedOffset < offset + len) */
523 } while (zs->decompressedOffset != offset + len);
524
525 return len;
526 }
527
ZSTD_seekable_decompressFrame(ZSTD_seekable * zs,void * dst,size_t dstSize,unsigned frameIndex)528 size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned frameIndex)
529 {
530 if (frameIndex >= zs->seekTable.tableLen) {
531 return ERROR(frameIndex_tooLarge);
532 }
533
534 { size_t const decompressedSize =
535 zs->seekTable.entries[frameIndex + 1].dOffset -
536 zs->seekTable.entries[frameIndex].dOffset;
537 if (dstSize < decompressedSize) {
538 return ERROR(dstSize_tooSmall);
539 }
540 return ZSTD_seekable_decompress(
541 zs, dst, decompressedSize,
542 zs->seekTable.entries[frameIndex].dOffset);
543 }
544 }
545