• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under the BSD-style license found in the
6  * LICENSE file in the root directory of https://github.com/facebook/zstd.
7  * An additional grant of patent rights can be found in the PATENTS file in the
8  * same directory.
9  *
10  * This program is free software; you can redistribute it and/or modify it under
11  * the terms of the GNU General Public License version 2 as published by the
12  * Free Software Foundation. This program is dual-licensed; you may select
13  * either version 2 of the GNU General Public License ("GPL") or BSD license
14  * ("BSD").
15  */
16 
17 /*-*************************************
18 *  Dependencies
19 ***************************************/
20 #include "fse.h"
21 #include "huf.h"
22 #include "mem.h"
23 #include "zstd_internal.h" /* includes zstd.h */
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/string.h> /* memset */
27 
28 /*-*************************************
29 *  Constants
30 ***************************************/
31 static const U32 g_searchStrength = 8; /* control skip over incompressible data */
32 #define HASH_READ_SIZE 8
33 typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
34 
35 /*-*************************************
36 *  Helper functions
37 ***************************************/
ZSTD_compressBound(size_t srcSize)38 size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
39 
40 /*-*************************************
41 *  Sequence storage
42 ***************************************/
ZSTD_resetSeqStore(seqStore_t * ssPtr)43 static void ZSTD_resetSeqStore(seqStore_t *ssPtr)
44 {
45 	ssPtr->lit = ssPtr->litStart;
46 	ssPtr->sequences = ssPtr->sequencesStart;
47 	ssPtr->longLengthID = 0;
48 }
49 
50 /*-*************************************
51 *  Context memory management
52 ***************************************/
53 struct ZSTD_CCtx_s {
54 	const BYTE *nextSrc;  /* next block here to continue on curr prefix */
55 	const BYTE *base;     /* All regular indexes relative to this position */
56 	const BYTE *dictBase; /* extDict indexes relative to this position */
57 	U32 dictLimit;	/* below that point, need extDict */
58 	U32 lowLimit;	 /* below that point, no more data */
59 	U32 nextToUpdate;     /* index from which to continue dictionary update */
60 	U32 nextToUpdate3;    /* index from which to continue dictionary update */
61 	U32 hashLog3;	 /* dispatch table : larger == faster, more memory */
62 	U32 loadedDictEnd;    /* index of end of dictionary */
63 	U32 forceWindow;      /* force back-references to respect limit of 1<<wLog, even for dictionary */
64 	U32 forceRawDict;     /* Force loading dictionary in "content-only" mode (no header analysis) */
65 	ZSTD_compressionStage_e stage;
66 	U32 rep[ZSTD_REP_NUM];
67 	U32 repToConfirm[ZSTD_REP_NUM];
68 	U32 dictID;
69 	ZSTD_parameters params;
70 	void *workSpace;
71 	size_t workSpaceSize;
72 	size_t blockSize;
73 	U64 frameContentSize;
74 	struct xxh64_state xxhState;
75 	ZSTD_customMem customMem;
76 
77 	seqStore_t seqStore; /* sequences storage ptrs */
78 	U32 *hashTable;
79 	U32 *hashTable3;
80 	U32 *chainTable;
81 	HUF_CElt *hufTable;
82 	U32 flagStaticTables;
83 	HUF_repeat flagStaticHufTable;
84 	FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
85 	FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
86 	FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
87 	unsigned tmpCounters[HUF_COMPRESS_WORKSPACE_SIZE_U32];
88 };
89 
ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams)90 size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams)
91 {
92 	size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
93 	U32 const divider = (cParams.searchLength == 3) ? 3 : 4;
94 	size_t const maxNbSeq = blockSize / divider;
95 	size_t const tokenSpace = blockSize + 11 * maxNbSeq;
96 	size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
97 	size_t const hSize = ((size_t)1) << cParams.hashLog;
98 	U32 const hashLog3 = (cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
99 	size_t const h3Size = ((size_t)1) << hashLog3;
100 	size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
101 	size_t const optSpace =
102 	    ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
103 	size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
104 				     (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
105 
106 	return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize);
107 }
108 
ZSTD_createCCtx_advanced(ZSTD_customMem customMem)109 static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
110 {
111 	ZSTD_CCtx *cctx;
112 	if (!customMem.customAlloc || !customMem.customFree)
113 		return NULL;
114 	cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
115 	if (!cctx)
116 		return NULL;
117 	memset(cctx, 0, sizeof(ZSTD_CCtx));
118 	cctx->customMem = customMem;
119 	return cctx;
120 }
121 
ZSTD_initCCtx(void * workspace,size_t workspaceSize)122 ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize)
123 {
124 	ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
125 	ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem);
126 	if (cctx) {
127 		cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize);
128 	}
129 	return cctx;
130 }
131 
ZSTD_freeCCtx(ZSTD_CCtx * cctx)132 size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx)
133 {
134 	if (cctx == NULL)
135 		return 0; /* support free on NULL */
136 	ZSTD_free(cctx->workSpace, cctx->customMem);
137 	ZSTD_free(cctx, cctx->customMem);
138 	return 0; /* reserved as a potential error code in the future */
139 }
140 
ZSTD_getSeqStore(const ZSTD_CCtx * ctx)141 const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) /* hidden interface */ { return &(ctx->seqStore); }
142 
ZSTD_getParamsFromCCtx(const ZSTD_CCtx * cctx)143 static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; }
144 
145 /** ZSTD_checkParams() :
146 	ensure param values remain within authorized range.
147 	@return : 0, or an error code if one value is beyond authorized range */
ZSTD_checkCParams(ZSTD_compressionParameters cParams)148 size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
149 {
150 #define CLAMPCHECK(val, min, max)                                       \
151 	{                                                               \
152 		if ((val < min) | (val > max))                          \
153 			return ERROR(compressionParameter_unsupported); \
154 	}
155 	CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
156 	CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
157 	CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
158 	CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
159 	CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
160 	CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
161 	if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2)
162 		return ERROR(compressionParameter_unsupported);
163 	return 0;
164 }
165 
166 /** ZSTD_cycleLog() :
167  *  condition for correct operation : hashLog > 1 */
ZSTD_cycleLog(U32 hashLog,ZSTD_strategy strat)168 static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
169 {
170 	U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
171 	return hashLog - btScale;
172 }
173 
174 /** ZSTD_adjustCParams() :
175 	optimize `cPar` for a given input (`srcSize` and `dictSize`).
176 	mostly downsizing to reduce memory consumption and initialization.
177 	Both `srcSize` and `dictSize` are optional (use 0 if unknown),
178 	but if both are 0, no optimization can be done.
179 	Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
ZSTD_adjustCParams(ZSTD_compressionParameters cPar,unsigned long long srcSize,size_t dictSize)180 ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
181 {
182 	if (srcSize + dictSize == 0)
183 		return cPar; /* no size information available : no adjustment */
184 
185 	/* resize params, to use less memory when necessary */
186 	{
187 		U32 const minSrcSize = (srcSize == 0) ? 500 : 0;
188 		U64 const rSize = srcSize + dictSize + minSrcSize;
189 		if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) {
190 			U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
191 			if (cPar.windowLog > srcLog)
192 				cPar.windowLog = srcLog;
193 		}
194 	}
195 	if (cPar.hashLog > cPar.windowLog)
196 		cPar.hashLog = cPar.windowLog;
197 	{
198 		U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
199 		if (cycleLog > cPar.windowLog)
200 			cPar.chainLog -= (cycleLog - cPar.windowLog);
201 	}
202 
203 	if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
204 		cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
205 
206 	return cPar;
207 }
208 
ZSTD_equivalentParams(ZSTD_parameters param1,ZSTD_parameters param2)209 static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
210 {
211 	return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) &
212 	       (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3));
213 }
214 
215 /*! ZSTD_continueCCtx() :
216 	reuse CCtx without reset (note : requires no dictionary) */
ZSTD_continueCCtx(ZSTD_CCtx * cctx,ZSTD_parameters params,U64 frameContentSize)217 static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize)
218 {
219 	U32 const end = (U32)(cctx->nextSrc - cctx->base);
220 	cctx->params = params;
221 	cctx->frameContentSize = frameContentSize;
222 	cctx->lowLimit = end;
223 	cctx->dictLimit = end;
224 	cctx->nextToUpdate = end + 1;
225 	cctx->stage = ZSTDcs_init;
226 	cctx->dictID = 0;
227 	cctx->loadedDictEnd = 0;
228 	{
229 		int i;
230 		for (i = 0; i < ZSTD_REP_NUM; i++)
231 			cctx->rep[i] = repStartValue[i];
232 	}
233 	cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */
234 	xxh64_reset(&cctx->xxhState, 0);
235 	return 0;
236 }
237 
238 typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
239 
240 /*! ZSTD_resetCCtx_advanced() :
241 	note : `params` must be validated */
ZSTD_resetCCtx_advanced(ZSTD_CCtx * zc,ZSTD_parameters params,U64 frameContentSize,ZSTD_compResetPolicy_e const crp)242 static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp)
243 {
244 	if (crp == ZSTDcrp_continue)
245 		if (ZSTD_equivalentParams(params, zc->params)) {
246 			zc->flagStaticTables = 0;
247 			zc->flagStaticHufTable = HUF_repeat_none;
248 			return ZSTD_continueCCtx(zc, params, frameContentSize);
249 		}
250 
251 	{
252 		size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
253 		U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4;
254 		size_t const maxNbSeq = blockSize / divider;
255 		size_t const tokenSpace = blockSize + 11 * maxNbSeq;
256 		size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
257 		size_t const hSize = ((size_t)1) << params.cParams.hashLog;
258 		U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
259 		size_t const h3Size = ((size_t)1) << hashLog3;
260 		size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
261 		void *ptr;
262 
263 		/* Check if workSpace is large enough, alloc a new one if needed */
264 		{
265 			size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) +
266 						(ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
267 			size_t const neededSpace = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
268 						   (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
269 			if (zc->workSpaceSize < neededSpace) {
270 				ZSTD_free(zc->workSpace, zc->customMem);
271 				zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
272 				if (zc->workSpace == NULL)
273 					return ERROR(memory_allocation);
274 				zc->workSpaceSize = neededSpace;
275 			}
276 		}
277 
278 		if (crp != ZSTDcrp_noMemset)
279 			memset(zc->workSpace, 0, tableSpace); /* reset tables only */
280 		xxh64_reset(&zc->xxhState, 0);
281 		zc->hashLog3 = hashLog3;
282 		zc->hashTable = (U32 *)(zc->workSpace);
283 		zc->chainTable = zc->hashTable + hSize;
284 		zc->hashTable3 = zc->chainTable + chainSize;
285 		ptr = zc->hashTable3 + h3Size;
286 		zc->hufTable = (HUF_CElt *)ptr;
287 		zc->flagStaticTables = 0;
288 		zc->flagStaticHufTable = HUF_repeat_none;
289 		ptr = ((U32 *)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
290 
291 		zc->nextToUpdate = 1;
292 		zc->nextSrc = NULL;
293 		zc->base = NULL;
294 		zc->dictBase = NULL;
295 		zc->dictLimit = 0;
296 		zc->lowLimit = 0;
297 		zc->params = params;
298 		zc->blockSize = blockSize;
299 		zc->frameContentSize = frameContentSize;
300 		{
301 			int i;
302 			for (i = 0; i < ZSTD_REP_NUM; i++)
303 				zc->rep[i] = repStartValue[i];
304 		}
305 
306 		if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) {
307 			zc->seqStore.litFreq = (U32 *)ptr;
308 			zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits);
309 			zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1);
310 			zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1);
311 			ptr = zc->seqStore.offCodeFreq + (MaxOff + 1);
312 			zc->seqStore.matchTable = (ZSTD_match_t *)ptr;
313 			ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1;
314 			zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr;
315 			ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1;
316 			zc->seqStore.litLengthSum = 0;
317 		}
318 		zc->seqStore.sequencesStart = (seqDef *)ptr;
319 		ptr = zc->seqStore.sequencesStart + maxNbSeq;
320 		zc->seqStore.llCode = (BYTE *)ptr;
321 		zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
322 		zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
323 		zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
324 
325 		zc->stage = ZSTDcs_init;
326 		zc->dictID = 0;
327 		zc->loadedDictEnd = 0;
328 
329 		return 0;
330 	}
331 }
332 
333 /* ZSTD_invalidateRepCodes() :
334  * ensures next compression will not use repcodes from previous block.
335  * Note : only works with regular variant;
336  *        do not use with extDict variant ! */
ZSTD_invalidateRepCodes(ZSTD_CCtx * cctx)337 void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx)
338 {
339 	int i;
340 	for (i = 0; i < ZSTD_REP_NUM; i++)
341 		cctx->rep[i] = 0;
342 }
343 
344 /*! ZSTD_copyCCtx() :
345 *   Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
346 *   Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
347 *   @return : 0, or an error code */
ZSTD_copyCCtx(ZSTD_CCtx * dstCCtx,const ZSTD_CCtx * srcCCtx,unsigned long long pledgedSrcSize)348 size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize)
349 {
350 	if (srcCCtx->stage != ZSTDcs_init)
351 		return ERROR(stage_wrong);
352 
353 	memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
354 	{
355 		ZSTD_parameters params = srcCCtx->params;
356 		params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
357 		ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
358 	}
359 
360 	/* copy tables */
361 	{
362 		size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
363 		size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
364 		size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
365 		size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
366 		memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
367 	}
368 
369 	/* copy dictionary offsets */
370 	dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
371 	dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3;
372 	dstCCtx->nextSrc = srcCCtx->nextSrc;
373 	dstCCtx->base = srcCCtx->base;
374 	dstCCtx->dictBase = srcCCtx->dictBase;
375 	dstCCtx->dictLimit = srcCCtx->dictLimit;
376 	dstCCtx->lowLimit = srcCCtx->lowLimit;
377 	dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd;
378 	dstCCtx->dictID = srcCCtx->dictID;
379 
380 	/* copy entropy tables */
381 	dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
382 	dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable;
383 	if (srcCCtx->flagStaticTables) {
384 		memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
385 		memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
386 		memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
387 	}
388 	if (srcCCtx->flagStaticHufTable) {
389 		memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4);
390 	}
391 
392 	return 0;
393 }
394 
395 /*! ZSTD_reduceTable() :
396 *   reduce table indexes by `reducerValue` */
ZSTD_reduceTable(U32 * const table,U32 const size,U32 const reducerValue)397 static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue)
398 {
399 	U32 u;
400 	for (u = 0; u < size; u++) {
401 		if (table[u] < reducerValue)
402 			table[u] = 0;
403 		else
404 			table[u] -= reducerValue;
405 	}
406 }
407 
408 /*! ZSTD_reduceIndex() :
409 *   rescale all indexes to avoid future overflow (indexes are U32) */
ZSTD_reduceIndex(ZSTD_CCtx * zc,const U32 reducerValue)410 static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue)
411 {
412 	{
413 		U32 const hSize = 1 << zc->params.cParams.hashLog;
414 		ZSTD_reduceTable(zc->hashTable, hSize, reducerValue);
415 	}
416 
417 	{
418 		U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
419 		ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue);
420 	}
421 
422 	{
423 		U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
424 		ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue);
425 	}
426 }
427 
428 /*-*******************************************************
429 *  Block entropic compression
430 *********************************************************/
431 
432 /* See doc/zstd_compression_format.md for detailed format description */
433 
ZSTD_noCompressBlock(void * dst,size_t dstCapacity,const void * src,size_t srcSize)434 size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
435 {
436 	if (srcSize + ZSTD_blockHeaderSize > dstCapacity)
437 		return ERROR(dstSize_tooSmall);
438 	memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize);
439 	ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
440 	return ZSTD_blockHeaderSize + srcSize;
441 }
442 
ZSTD_noCompressLiterals(void * dst,size_t dstCapacity,const void * src,size_t srcSize)443 static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
444 {
445 	BYTE *const ostart = (BYTE * const)dst;
446 	U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
447 
448 	if (srcSize + flSize > dstCapacity)
449 		return ERROR(dstSize_tooSmall);
450 
451 	switch (flSize) {
452 	case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break;
453 	case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break;
454 	default: /*note : should not be necessary : flSize is within {1,2,3} */
455 	case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break;
456 	}
457 
458 	memcpy(ostart + flSize, src, srcSize);
459 	return srcSize + flSize;
460 }
461 
ZSTD_compressRleLiteralsBlock(void * dst,size_t dstCapacity,const void * src,size_t srcSize)462 static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
463 {
464 	BYTE *const ostart = (BYTE * const)dst;
465 	U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
466 
467 	(void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
468 
469 	switch (flSize) {
470 	case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break;
471 	case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break;
472 	default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */
473 	case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break;
474 	}
475 
476 	ostart[flSize] = *(const BYTE *)src;
477 	return flSize + 1;
478 }
479 
ZSTD_minGain(size_t srcSize)480 static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
481 
ZSTD_compressLiterals(ZSTD_CCtx * zc,void * dst,size_t dstCapacity,const void * src,size_t srcSize)482 static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
483 {
484 	size_t const minGain = ZSTD_minGain(srcSize);
485 	size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
486 	BYTE *const ostart = (BYTE *)dst;
487 	U32 singleStream = srcSize < 256;
488 	symbolEncodingType_e hType = set_compressed;
489 	size_t cLitSize;
490 
491 /* small ? don't even attempt compression (speed opt) */
492 #define LITERAL_NOENTROPY 63
493 	{
494 		size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
495 		if (srcSize <= minLitSize)
496 			return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
497 	}
498 
499 	if (dstCapacity < lhSize + 1)
500 		return ERROR(dstSize_tooSmall); /* not enough space for compression */
501 	{
502 		HUF_repeat repeat = zc->flagStaticHufTable;
503 		int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
504 		if (repeat == HUF_repeat_valid && lhSize == 3)
505 			singleStream = 1;
506 		cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
507 								sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
508 					: HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
509 								sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat);
510 		if (repeat != HUF_repeat_none) {
511 			hType = set_repeat;
512 		} /* reused the existing table */
513 		else {
514 			zc->flagStaticHufTable = HUF_repeat_check;
515 		} /* now have a table to reuse */
516 	}
517 
518 	if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) {
519 		zc->flagStaticHufTable = HUF_repeat_none;
520 		return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
521 	}
522 	if (cLitSize == 1) {
523 		zc->flagStaticHufTable = HUF_repeat_none;
524 		return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
525 	}
526 
527 	/* Build header */
528 	switch (lhSize) {
529 	case 3: /* 2 - 2 - 10 - 10 */
530 	{
531 		U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14);
532 		ZSTD_writeLE24(ostart, lhc);
533 		break;
534 	}
535 	case 4: /* 2 - 2 - 14 - 14 */
536 	{
537 		U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18);
538 		ZSTD_writeLE32(ostart, lhc);
539 		break;
540 	}
541 	default: /* should not be necessary, lhSize is only {3,4,5} */
542 	case 5:  /* 2 - 2 - 18 - 18 */
543 	{
544 		U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22);
545 		ZSTD_writeLE32(ostart, lhc);
546 		ostart[4] = (BYTE)(cLitSize >> 10);
547 		break;
548 	}
549 	}
550 	return lhSize + cLitSize;
551 }
552 
553 static const BYTE LL_Code[64] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18,
554 				 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
555 				 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24};
556 
557 static const BYTE ML_Code[128] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
558 				  26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38,
559 				  38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
560 				  40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42,
561 				  42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42};
562 
ZSTD_seqToCodes(const seqStore_t * seqStorePtr)563 void ZSTD_seqToCodes(const seqStore_t *seqStorePtr)
564 {
565 	BYTE const LL_deltaCode = 19;
566 	BYTE const ML_deltaCode = 36;
567 	const seqDef *const sequences = seqStorePtr->sequencesStart;
568 	BYTE *const llCodeTable = seqStorePtr->llCode;
569 	BYTE *const ofCodeTable = seqStorePtr->ofCode;
570 	BYTE *const mlCodeTable = seqStorePtr->mlCode;
571 	U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
572 	U32 u;
573 	for (u = 0; u < nbSeq; u++) {
574 		U32 const llv = sequences[u].litLength;
575 		U32 const mlv = sequences[u].matchLength;
576 		llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
577 		ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
578 		mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
579 	}
580 	if (seqStorePtr->longLengthID == 1)
581 		llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
582 	if (seqStorePtr->longLengthID == 2)
583 		mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
584 }
585 
ZSTD_compressSequences_internal(ZSTD_CCtx * zc,void * dst,size_t dstCapacity)586 ZSTD_STATIC size_t ZSTD_compressSequences_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity)
587 {
588 	const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
589 	const seqStore_t *seqStorePtr = &(zc->seqStore);
590 	FSE_CTable *CTable_LitLength = zc->litlengthCTable;
591 	FSE_CTable *CTable_OffsetBits = zc->offcodeCTable;
592 	FSE_CTable *CTable_MatchLength = zc->matchlengthCTable;
593 	U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
594 	const seqDef *const sequences = seqStorePtr->sequencesStart;
595 	const BYTE *const ofCodeTable = seqStorePtr->ofCode;
596 	const BYTE *const llCodeTable = seqStorePtr->llCode;
597 	const BYTE *const mlCodeTable = seqStorePtr->mlCode;
598 	BYTE *const ostart = (BYTE *)dst;
599 	BYTE *const oend = ostart + dstCapacity;
600 	BYTE *op = ostart;
601 	size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
602 	BYTE *seqHead;
603 
604 	U32 *count;
605 	S16 *norm;
606 	U32 *workspace;
607 	size_t workspaceSize = sizeof(zc->tmpCounters);
608 	{
609 		size_t spaceUsed32 = 0;
610 		count = (U32 *)zc->tmpCounters + spaceUsed32;
611 		spaceUsed32 += MaxSeq + 1;
612 		norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32);
613 		spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
614 
615 		workspace = (U32 *)zc->tmpCounters + spaceUsed32;
616 		workspaceSize -= (spaceUsed32 << 2);
617 	}
618 
619 	/* Compress literals */
620 	{
621 		const BYTE *const literals = seqStorePtr->litStart;
622 		size_t const litSize = seqStorePtr->lit - literals;
623 		size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
624 		if (ZSTD_isError(cSize))
625 			return cSize;
626 		op += cSize;
627 	}
628 
629 	/* Sequences Header */
630 	if ((oend - op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */)
631 		return ERROR(dstSize_tooSmall);
632 	if (nbSeq < 0x7F)
633 		*op++ = (BYTE)nbSeq;
634 	else if (nbSeq < LONGNBSEQ)
635 		op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2;
636 	else
637 		op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3;
638 	if (nbSeq == 0)
639 		return op - ostart;
640 
641 	/* seqHead : flags for FSE encoding type */
642 	seqHead = op++;
643 
644 #define MIN_SEQ_FOR_DYNAMIC_FSE 64
645 #define MAX_SEQ_FOR_STATIC_FSE 1000
646 
647 	/* convert length/distances into codes */
648 	ZSTD_seqToCodes(seqStorePtr);
649 
650 	/* CTable for Literal Lengths */
651 	{
652 		U32 max = MaxLL;
653 		size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
654 		if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
655 			*op++ = llCodeTable[0];
656 			FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
657 			LLtype = set_rle;
658 		} else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
659 			LLtype = set_repeat;
660 		} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) {
661 			FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize);
662 			LLtype = set_basic;
663 		} else {
664 			size_t nbSeq_1 = nbSeq;
665 			const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
666 			if (count[llCodeTable[nbSeq - 1]] > 1) {
667 				count[llCodeTable[nbSeq - 1]]--;
668 				nbSeq_1--;
669 			}
670 			FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
671 			{
672 				size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
673 				if (FSE_isError(NCountSize))
674 					return NCountSize;
675 				op += NCountSize;
676 			}
677 			FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize);
678 			LLtype = set_compressed;
679 		}
680 	}
681 
682 	/* CTable for Offsets */
683 	{
684 		U32 max = MaxOff;
685 		size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
686 		if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
687 			*op++ = ofCodeTable[0];
688 			FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
689 			Offtype = set_rle;
690 		} else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
691 			Offtype = set_repeat;
692 		} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) {
693 			FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize);
694 			Offtype = set_basic;
695 		} else {
696 			size_t nbSeq_1 = nbSeq;
697 			const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
698 			if (count[ofCodeTable[nbSeq - 1]] > 1) {
699 				count[ofCodeTable[nbSeq - 1]]--;
700 				nbSeq_1--;
701 			}
702 			FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
703 			{
704 				size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
705 				if (FSE_isError(NCountSize))
706 					return NCountSize;
707 				op += NCountSize;
708 			}
709 			FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize);
710 			Offtype = set_compressed;
711 		}
712 	}
713 
714 	/* CTable for MatchLengths */
715 	{
716 		U32 max = MaxML;
717 		size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
718 		if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
719 			*op++ = *mlCodeTable;
720 			FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
721 			MLtype = set_rle;
722 		} else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
723 			MLtype = set_repeat;
724 		} else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) {
725 			FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize);
726 			MLtype = set_basic;
727 		} else {
728 			size_t nbSeq_1 = nbSeq;
729 			const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
730 			if (count[mlCodeTable[nbSeq - 1]] > 1) {
731 				count[mlCodeTable[nbSeq - 1]]--;
732 				nbSeq_1--;
733 			}
734 			FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
735 			{
736 				size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
737 				if (FSE_isError(NCountSize))
738 					return NCountSize;
739 				op += NCountSize;
740 			}
741 			FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize);
742 			MLtype = set_compressed;
743 		}
744 	}
745 
746 	*seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2));
747 	zc->flagStaticTables = 0;
748 
749 	/* Encoding Sequences */
750 	{
751 		BIT_CStream_t blockStream;
752 		FSE_CState_t stateMatchLength;
753 		FSE_CState_t stateOffsetBits;
754 		FSE_CState_t stateLitLength;
755 
756 		CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall); /* not enough space remaining */
757 
758 		/* first symbols */
759 		FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]);
760 		FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]);
761 		FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]);
762 		BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]);
763 		if (ZSTD_32bits())
764 			BIT_flushBits(&blockStream);
765 		BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]);
766 		if (ZSTD_32bits())
767 			BIT_flushBits(&blockStream);
768 		if (longOffsets) {
769 			U32 const ofBits = ofCodeTable[nbSeq - 1];
770 			int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
771 			if (extraBits) {
772 				BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits);
773 				BIT_flushBits(&blockStream);
774 			}
775 			BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits);
776 		} else {
777 			BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]);
778 		}
779 		BIT_flushBits(&blockStream);
780 
781 		{
782 			size_t n;
783 			for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */
784 				BYTE const llCode = llCodeTable[n];
785 				BYTE const ofCode = ofCodeTable[n];
786 				BYTE const mlCode = mlCodeTable[n];
787 				U32 const llBits = LL_bits[llCode];
788 				U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
789 				U32 const mlBits = ML_bits[mlCode];
790 				/* (7)*/							    /* (7)*/
791 				FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */  /* 15 */
792 				FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
793 				if (ZSTD_32bits())
794 					BIT_flushBits(&blockStream);				  /* (7)*/
795 				FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
796 				if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
797 					BIT_flushBits(&blockStream); /* (7)*/
798 				BIT_addBits(&blockStream, sequences[n].litLength, llBits);
799 				if (ZSTD_32bits() && ((llBits + mlBits) > 24))
800 					BIT_flushBits(&blockStream);
801 				BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
802 				if (ZSTD_32bits())
803 					BIT_flushBits(&blockStream); /* (7)*/
804 				if (longOffsets) {
805 					int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
806 					if (extraBits) {
807 						BIT_addBits(&blockStream, sequences[n].offset, extraBits);
808 						BIT_flushBits(&blockStream); /* (7)*/
809 					}
810 					BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */
811 				} else {
812 					BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
813 				}
814 				BIT_flushBits(&blockStream); /* (7)*/
815 			}
816 		}
817 
818 		FSE_flushCState(&blockStream, &stateMatchLength);
819 		FSE_flushCState(&blockStream, &stateOffsetBits);
820 		FSE_flushCState(&blockStream, &stateLitLength);
821 
822 		{
823 			size_t const streamSize = BIT_closeCStream(&blockStream);
824 			if (streamSize == 0)
825 				return ERROR(dstSize_tooSmall); /* not enough space */
826 			op += streamSize;
827 		}
828 	}
829 	return op - ostart;
830 }
831 
ZSTD_compressSequences(ZSTD_CCtx * zc,void * dst,size_t dstCapacity,size_t srcSize)832 ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize)
833 {
834 	size_t const cSize = ZSTD_compressSequences_internal(zc, dst, dstCapacity);
835 	size_t const minGain = ZSTD_minGain(srcSize);
836 	size_t const maxCSize = srcSize - minGain;
837 	/* If the srcSize <= dstCapacity, then there is enough space to write a
838 	 * raw uncompressed block. Since we ran out of space, the block must not
839 	 * be compressible, so fall back to a raw uncompressed block.
840 	 */
841 	int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity;
842 	int i;
843 
844 	if (ZSTD_isError(cSize) && !uncompressibleError)
845 		return cSize;
846 	if (cSize >= maxCSize || uncompressibleError) {
847 		zc->flagStaticHufTable = HUF_repeat_none;
848 		return 0;
849 	}
850 	/* confirm repcodes */
851 	for (i = 0; i < ZSTD_REP_NUM; i++)
852 		zc->rep[i] = zc->repToConfirm[i];
853 	return cSize;
854 }
855 
856 /*! ZSTD_storeSeq() :
857 	Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
858 	`offsetCode` : distance to match, or 0 == repCode.
859 	`matchCode` : matchLength - MINMATCH
860 */
ZSTD_storeSeq(seqStore_t * seqStorePtr,size_t litLength,const void * literals,U32 offsetCode,size_t matchCode)861 ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode)
862 {
863 	/* copy Literals */
864 	ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
865 	seqStorePtr->lit += litLength;
866 
867 	/* literal Length */
868 	if (litLength > 0xFFFF) {
869 		seqStorePtr->longLengthID = 1;
870 		seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
871 	}
872 	seqStorePtr->sequences[0].litLength = (U16)litLength;
873 
874 	/* match offset */
875 	seqStorePtr->sequences[0].offset = offsetCode + 1;
876 
877 	/* match Length */
878 	if (matchCode > 0xFFFF) {
879 		seqStorePtr->longLengthID = 2;
880 		seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
881 	}
882 	seqStorePtr->sequences[0].matchLength = (U16)matchCode;
883 
884 	seqStorePtr->sequences++;
885 }
886 
887 /*-*************************************
888 *  Match length counter
889 ***************************************/
ZSTD_NbCommonBytes(register size_t val)890 static unsigned ZSTD_NbCommonBytes(register size_t val)
891 {
892 	if (ZSTD_isLittleEndian()) {
893 		if (ZSTD_64bits()) {
894 			return (__builtin_ctzll((U64)val) >> 3);
895 		} else { /* 32 bits */
896 			return (__builtin_ctz((U32)val) >> 3);
897 		}
898 	} else { /* Big Endian CPU */
899 		if (ZSTD_64bits()) {
900 			return (__builtin_clzll(val) >> 3);
901 		} else { /* 32 bits */
902 			return (__builtin_clz((U32)val) >> 3);
903 		}
904 	}
905 }
906 
ZSTD_count(const BYTE * pIn,const BYTE * pMatch,const BYTE * const pInLimit)907 static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
908 {
909 	const BYTE *const pStart = pIn;
910 	const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1);
911 
912 	while (pIn < pInLoopLimit) {
913 		size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn);
914 		if (!diff) {
915 			pIn += sizeof(size_t);
916 			pMatch += sizeof(size_t);
917 			continue;
918 		}
919 		pIn += ZSTD_NbCommonBytes(diff);
920 		return (size_t)(pIn - pStart);
921 	}
922 	if (ZSTD_64bits())
923 		if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) {
924 			pIn += 4;
925 			pMatch += 4;
926 		}
927 	if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) {
928 		pIn += 2;
929 		pMatch += 2;
930 	}
931 	if ((pIn < pInLimit) && (*pMatch == *pIn))
932 		pIn++;
933 	return (size_t)(pIn - pStart);
934 }
935 
936 /** ZSTD_count_2segments() :
937 *   can count match length with `ip` & `match` in 2 different segments.
938 *   convention : on reaching mEnd, match count continue starting from iStart
939 */
ZSTD_count_2segments(const BYTE * ip,const BYTE * match,const BYTE * iEnd,const BYTE * mEnd,const BYTE * iStart)940 static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
941 {
942 	const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd);
943 	size_t const matchLength = ZSTD_count(ip, match, vEnd);
944 	if (match + matchLength != mEnd)
945 		return matchLength;
946 	return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd);
947 }
948 
949 /*-*************************************
950 *  Hashes
951 ***************************************/
952 static const U32 prime3bytes = 506832829U;
ZSTD_hash3(U32 u,U32 h)953 static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); }
ZSTD_hash3Ptr(const void * ptr,U32 h)954 ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */
955 
956 static const U32 prime4bytes = 2654435761U;
ZSTD_hash4(U32 u,U32 h)957 static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); }
ZSTD_hash4Ptr(const void * ptr,U32 h)958 static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); }
959 
960 static const U64 prime5bytes = 889523592379ULL;
ZSTD_hash5(U64 u,U32 h)961 static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); }
ZSTD_hash5Ptr(const void * p,U32 h)962 static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); }
963 
964 static const U64 prime6bytes = 227718039650203ULL;
ZSTD_hash6(U64 u,U32 h)965 static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); }
ZSTD_hash6Ptr(const void * p,U32 h)966 static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); }
967 
968 static const U64 prime7bytes = 58295818150454627ULL;
ZSTD_hash7(U64 u,U32 h)969 static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); }
ZSTD_hash7Ptr(const void * p,U32 h)970 static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); }
971 
972 static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
ZSTD_hash8(U64 u,U32 h)973 static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); }
ZSTD_hash8Ptr(const void * p,U32 h)974 static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); }
975 
ZSTD_hashPtr(const void * p,U32 hBits,U32 mls)976 static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
977 {
978 	switch (mls) {
979 	// case 3: return ZSTD_hash3Ptr(p, hBits);
980 	default:
981 	case 4: return ZSTD_hash4Ptr(p, hBits);
982 	case 5: return ZSTD_hash5Ptr(p, hBits);
983 	case 6: return ZSTD_hash6Ptr(p, hBits);
984 	case 7: return ZSTD_hash7Ptr(p, hBits);
985 	case 8: return ZSTD_hash8Ptr(p, hBits);
986 	}
987 }
988 
989 /*-*************************************
990 *  Fast Scan
991 ***************************************/
ZSTD_fillHashTable(ZSTD_CCtx * zc,const void * end,const U32 mls)992 static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls)
993 {
994 	U32 *const hashTable = zc->hashTable;
995 	U32 const hBits = zc->params.cParams.hashLog;
996 	const BYTE *const base = zc->base;
997 	const BYTE *ip = base + zc->nextToUpdate;
998 	const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
999 	const size_t fastHashFillStep = 3;
1000 
1001 	while (ip <= iend) {
1002 		hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
1003 		ip += fastHashFillStep;
1004 	}
1005 }
1006 
1007 FORCE_INLINE
ZSTD_compressBlock_fast_generic(ZSTD_CCtx * cctx,const void * src,size_t srcSize,const U32 mls)1008 void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
1009 {
1010 	U32 *const hashTable = cctx->hashTable;
1011 	U32 const hBits = cctx->params.cParams.hashLog;
1012 	seqStore_t *seqStorePtr = &(cctx->seqStore);
1013 	const BYTE *const base = cctx->base;
1014 	const BYTE *const istart = (const BYTE *)src;
1015 	const BYTE *ip = istart;
1016 	const BYTE *anchor = istart;
1017 	const U32 lowestIndex = cctx->dictLimit;
1018 	const BYTE *const lowest = base + lowestIndex;
1019 	const BYTE *const iend = istart + srcSize;
1020 	const BYTE *const ilimit = iend - HASH_READ_SIZE;
1021 	U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
1022 	U32 offsetSaved = 0;
1023 
1024 	/* init */
1025 	ip += (ip == lowest);
1026 	{
1027 		U32 const maxRep = (U32)(ip - lowest);
1028 		if (offset_2 > maxRep)
1029 			offsetSaved = offset_2, offset_2 = 0;
1030 		if (offset_1 > maxRep)
1031 			offsetSaved = offset_1, offset_1 = 0;
1032 	}
1033 
1034 	/* Main Search Loop */
1035 	while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
1036 		size_t mLength;
1037 		size_t const h = ZSTD_hashPtr(ip, hBits, mls);
1038 		U32 const curr = (U32)(ip - base);
1039 		U32 const matchIndex = hashTable[h];
1040 		const BYTE *match = base + matchIndex;
1041 		hashTable[h] = curr; /* update hash table */
1042 
1043 		if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
1044 			mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
1045 			ip++;
1046 			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
1047 		} else {
1048 			U32 offset;
1049 			if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
1050 				ip += ((ip - anchor) >> g_searchStrength) + 1;
1051 				continue;
1052 			}
1053 			mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
1054 			offset = (U32)(ip - match);
1055 			while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
1056 				ip--;
1057 				match--;
1058 				mLength++;
1059 			} /* catch up */
1060 			offset_2 = offset_1;
1061 			offset_1 = offset;
1062 
1063 			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
1064 		}
1065 
1066 		/* match found */
1067 		ip += mLength;
1068 		anchor = ip;
1069 
1070 		if (ip <= ilimit) {
1071 			/* Fill Table */
1072 			hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
1073 			hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
1074 			/* check immediate repcode */
1075 			while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
1076 				/* store sequence */
1077 				size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
1078 				{
1079 					U32 const tmpOff = offset_2;
1080 					offset_2 = offset_1;
1081 					offset_1 = tmpOff;
1082 				} /* swap offset_2 <=> offset_1 */
1083 				hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
1084 				ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
1085 				ip += rLength;
1086 				anchor = ip;
1087 				continue; /* faster when present ... (?) */
1088 			}
1089 		}
1090 	}
1091 
1092 	/* save reps for next block */
1093 	cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
1094 	cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
1095 
1096 	/* Last Literals */
1097 	{
1098 		size_t const lastLLSize = iend - anchor;
1099 		memcpy(seqStorePtr->lit, anchor, lastLLSize);
1100 		seqStorePtr->lit += lastLLSize;
1101 	}
1102 }
1103 
ZSTD_compressBlock_fast(ZSTD_CCtx * ctx,const void * src,size_t srcSize)1104 static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
1105 {
1106 	const U32 mls = ctx->params.cParams.searchLength;
1107 	switch (mls) {
1108 	default: /* includes case 3 */
1109 	case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
1110 	case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
1111 	case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
1112 	case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
1113 	}
1114 }
1115 
ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx * ctx,const void * src,size_t srcSize,const U32 mls)1116 static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
1117 {
1118 	U32 *hashTable = ctx->hashTable;
1119 	const U32 hBits = ctx->params.cParams.hashLog;
1120 	seqStore_t *seqStorePtr = &(ctx->seqStore);
1121 	const BYTE *const base = ctx->base;
1122 	const BYTE *const dictBase = ctx->dictBase;
1123 	const BYTE *const istart = (const BYTE *)src;
1124 	const BYTE *ip = istart;
1125 	const BYTE *anchor = istart;
1126 	const U32 lowestIndex = ctx->lowLimit;
1127 	const BYTE *const dictStart = dictBase + lowestIndex;
1128 	const U32 dictLimit = ctx->dictLimit;
1129 	const BYTE *const lowPrefixPtr = base + dictLimit;
1130 	const BYTE *const dictEnd = dictBase + dictLimit;
1131 	const BYTE *const iend = istart + srcSize;
1132 	const BYTE *const ilimit = iend - 8;
1133 	U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
1134 
1135 	/* Search Loop */
1136 	while (ip < ilimit) { /* < instead of <=, because (ip+1) */
1137 		const size_t h = ZSTD_hashPtr(ip, hBits, mls);
1138 		const U32 matchIndex = hashTable[h];
1139 		const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
1140 		const BYTE *match = matchBase + matchIndex;
1141 		const U32 curr = (U32)(ip - base);
1142 		const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
1143 		const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
1144 		const BYTE *repMatch = repBase + repIndex;
1145 		size_t mLength;
1146 		hashTable[h] = curr; /* update hash table */
1147 
1148 		if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
1149 		    (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
1150 			const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
1151 			mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
1152 			ip++;
1153 			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
1154 		} else {
1155 			if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
1156 				ip += ((ip - anchor) >> g_searchStrength) + 1;
1157 				continue;
1158 			}
1159 			{
1160 				const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
1161 				const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
1162 				U32 offset;
1163 				mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
1164 				while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
1165 					ip--;
1166 					match--;
1167 					mLength++;
1168 				} /* catch up */
1169 				offset = curr - matchIndex;
1170 				offset_2 = offset_1;
1171 				offset_1 = offset;
1172 				ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
1173 			}
1174 		}
1175 
1176 		/* found a match : store it */
1177 		ip += mLength;
1178 		anchor = ip;
1179 
1180 		if (ip <= ilimit) {
1181 			/* Fill Table */
1182 			hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2;
1183 			hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
1184 			/* check immediate repcode */
1185 			while (ip <= ilimit) {
1186 				U32 const curr2 = (U32)(ip - base);
1187 				U32 const repIndex2 = curr2 - offset_2;
1188 				const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
1189 				if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
1190 				    && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
1191 					const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
1192 					size_t repLength2 =
1193 					    ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
1194 					U32 tmpOffset = offset_2;
1195 					offset_2 = offset_1;
1196 					offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
1197 					ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
1198 					hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2;
1199 					ip += repLength2;
1200 					anchor = ip;
1201 					continue;
1202 				}
1203 				break;
1204 			}
1205 		}
1206 	}
1207 
1208 	/* save reps for next block */
1209 	ctx->repToConfirm[0] = offset_1;
1210 	ctx->repToConfirm[1] = offset_2;
1211 
1212 	/* Last Literals */
1213 	{
1214 		size_t const lastLLSize = iend - anchor;
1215 		memcpy(seqStorePtr->lit, anchor, lastLLSize);
1216 		seqStorePtr->lit += lastLLSize;
1217 	}
1218 }
1219 
ZSTD_compressBlock_fast_extDict(ZSTD_CCtx * ctx,const void * src,size_t srcSize)1220 static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
1221 {
1222 	U32 const mls = ctx->params.cParams.searchLength;
1223 	switch (mls) {
1224 	default: /* includes case 3 */
1225 	case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
1226 	case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
1227 	case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
1228 	case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
1229 	}
1230 }
1231 
1232 /*-*************************************
1233 *  Double Fast
1234 ***************************************/
ZSTD_fillDoubleHashTable(ZSTD_CCtx * cctx,const void * end,const U32 mls)1235 static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls)
1236 {
1237 	U32 *const hashLarge = cctx->hashTable;
1238 	U32 const hBitsL = cctx->params.cParams.hashLog;
1239 	U32 *const hashSmall = cctx->chainTable;
1240 	U32 const hBitsS = cctx->params.cParams.chainLog;
1241 	const BYTE *const base = cctx->base;
1242 	const BYTE *ip = base + cctx->nextToUpdate;
1243 	const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
1244 	const size_t fastHashFillStep = 3;
1245 
1246 	while (ip <= iend) {
1247 		hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
1248 		hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
1249 		ip += fastHashFillStep;
1250 	}
1251 }
1252 
1253 FORCE_INLINE
ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx * cctx,const void * src,size_t srcSize,const U32 mls)1254 void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
1255 {
1256 	U32 *const hashLong = cctx->hashTable;
1257 	const U32 hBitsL = cctx->params.cParams.hashLog;
1258 	U32 *const hashSmall = cctx->chainTable;
1259 	const U32 hBitsS = cctx->params.cParams.chainLog;
1260 	seqStore_t *seqStorePtr = &(cctx->seqStore);
1261 	const BYTE *const base = cctx->base;
1262 	const BYTE *const istart = (const BYTE *)src;
1263 	const BYTE *ip = istart;
1264 	const BYTE *anchor = istart;
1265 	const U32 lowestIndex = cctx->dictLimit;
1266 	const BYTE *const lowest = base + lowestIndex;
1267 	const BYTE *const iend = istart + srcSize;
1268 	const BYTE *const ilimit = iend - HASH_READ_SIZE;
1269 	U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
1270 	U32 offsetSaved = 0;
1271 
1272 	/* init */
1273 	ip += (ip == lowest);
1274 	{
1275 		U32 const maxRep = (U32)(ip - lowest);
1276 		if (offset_2 > maxRep)
1277 			offsetSaved = offset_2, offset_2 = 0;
1278 		if (offset_1 > maxRep)
1279 			offsetSaved = offset_1, offset_1 = 0;
1280 	}
1281 
1282 	/* Main Search Loop */
1283 	while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
1284 		size_t mLength;
1285 		size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
1286 		size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
1287 		U32 const curr = (U32)(ip - base);
1288 		U32 const matchIndexL = hashLong[h2];
1289 		U32 const matchIndexS = hashSmall[h];
1290 		const BYTE *matchLong = base + matchIndexL;
1291 		const BYTE *match = base + matchIndexS;
1292 		hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
1293 
1294 		if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
1295 			mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
1296 			ip++;
1297 			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
1298 		} else {
1299 			U32 offset;
1300 			if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
1301 				mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8;
1302 				offset = (U32)(ip - matchLong);
1303 				while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) {
1304 					ip--;
1305 					matchLong--;
1306 					mLength++;
1307 				} /* catch up */
1308 			} else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
1309 				size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
1310 				U32 const matchIndex3 = hashLong[h3];
1311 				const BYTE *match3 = base + matchIndex3;
1312 				hashLong[h3] = curr + 1;
1313 				if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
1314 					mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8;
1315 					ip++;
1316 					offset = (U32)(ip - match3);
1317 					while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) {
1318 						ip--;
1319 						match3--;
1320 						mLength++;
1321 					} /* catch up */
1322 				} else {
1323 					mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
1324 					offset = (U32)(ip - match);
1325 					while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
1326 						ip--;
1327 						match--;
1328 						mLength++;
1329 					} /* catch up */
1330 				}
1331 			} else {
1332 				ip += ((ip - anchor) >> g_searchStrength) + 1;
1333 				continue;
1334 			}
1335 
1336 			offset_2 = offset_1;
1337 			offset_1 = offset;
1338 
1339 			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
1340 		}
1341 
1342 		/* match found */
1343 		ip += mLength;
1344 		anchor = ip;
1345 
1346 		if (ip <= ilimit) {
1347 			/* Fill Table */
1348 			hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] =
1349 			    curr + 2; /* here because curr+2 could be > iend-8 */
1350 			hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
1351 
1352 			/* check immediate repcode */
1353 			while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
1354 				/* store sequence */
1355 				size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
1356 				{
1357 					U32 const tmpOff = offset_2;
1358 					offset_2 = offset_1;
1359 					offset_1 = tmpOff;
1360 				} /* swap offset_2 <=> offset_1 */
1361 				hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
1362 				hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
1363 				ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
1364 				ip += rLength;
1365 				anchor = ip;
1366 				continue; /* faster when present ... (?) */
1367 			}
1368 		}
1369 	}
1370 
1371 	/* save reps for next block */
1372 	cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
1373 	cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
1374 
1375 	/* Last Literals */
1376 	{
1377 		size_t const lastLLSize = iend - anchor;
1378 		memcpy(seqStorePtr->lit, anchor, lastLLSize);
1379 		seqStorePtr->lit += lastLLSize;
1380 	}
1381 }
1382 
ZSTD_compressBlock_doubleFast(ZSTD_CCtx * ctx,const void * src,size_t srcSize)1383 static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
1384 {
1385 	const U32 mls = ctx->params.cParams.searchLength;
1386 	switch (mls) {
1387 	default: /* includes case 3 */
1388 	case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
1389 	case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
1390 	case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
1391 	case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
1392 	}
1393 }
1394 
ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx * ctx,const void * src,size_t srcSize,const U32 mls)1395 static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
1396 {
1397 	U32 *const hashLong = ctx->hashTable;
1398 	U32 const hBitsL = ctx->params.cParams.hashLog;
1399 	U32 *const hashSmall = ctx->chainTable;
1400 	U32 const hBitsS = ctx->params.cParams.chainLog;
1401 	seqStore_t *seqStorePtr = &(ctx->seqStore);
1402 	const BYTE *const base = ctx->base;
1403 	const BYTE *const dictBase = ctx->dictBase;
1404 	const BYTE *const istart = (const BYTE *)src;
1405 	const BYTE *ip = istart;
1406 	const BYTE *anchor = istart;
1407 	const U32 lowestIndex = ctx->lowLimit;
1408 	const BYTE *const dictStart = dictBase + lowestIndex;
1409 	const U32 dictLimit = ctx->dictLimit;
1410 	const BYTE *const lowPrefixPtr = base + dictLimit;
1411 	const BYTE *const dictEnd = dictBase + dictLimit;
1412 	const BYTE *const iend = istart + srcSize;
1413 	const BYTE *const ilimit = iend - 8;
1414 	U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
1415 
1416 	/* Search Loop */
1417 	while (ip < ilimit) { /* < instead of <=, because (ip+1) */
1418 		const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
1419 		const U32 matchIndex = hashSmall[hSmall];
1420 		const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
1421 		const BYTE *match = matchBase + matchIndex;
1422 
1423 		const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
1424 		const U32 matchLongIndex = hashLong[hLong];
1425 		const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
1426 		const BYTE *matchLong = matchLongBase + matchLongIndex;
1427 
1428 		const U32 curr = (U32)(ip - base);
1429 		const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
1430 		const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
1431 		const BYTE *repMatch = repBase + repIndex;
1432 		size_t mLength;
1433 		hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
1434 
1435 		if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
1436 		    (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
1437 			const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
1438 			mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4;
1439 			ip++;
1440 			ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
1441 		} else {
1442 			if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
1443 				const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
1444 				const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
1445 				U32 offset;
1446 				mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8;
1447 				offset = curr - matchLongIndex;
1448 				while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) {
1449 					ip--;
1450 					matchLong--;
1451 					mLength++;
1452 				} /* catch up */
1453 				offset_2 = offset_1;
1454 				offset_1 = offset;
1455 				ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
1456 
1457 			} else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
1458 				size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
1459 				U32 const matchIndex3 = hashLong[h3];
1460 				const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base;
1461 				const BYTE *match3 = match3Base + matchIndex3;
1462 				U32 offset;
1463 				hashLong[h3] = curr + 1;
1464 				if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
1465 					const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
1466 					const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
1467 					mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8;
1468 					ip++;
1469 					offset = curr + 1 - matchIndex3;
1470 					while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) {
1471 						ip--;
1472 						match3--;
1473 						mLength++;
1474 					} /* catch up */
1475 				} else {
1476 					const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
1477 					const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
1478 					mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4;
1479 					offset = curr - matchIndex;
1480 					while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
1481 						ip--;
1482 						match--;
1483 						mLength++;
1484 					} /* catch up */
1485 				}
1486 				offset_2 = offset_1;
1487 				offset_1 = offset;
1488 				ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
1489 
1490 			} else {
1491 				ip += ((ip - anchor) >> g_searchStrength) + 1;
1492 				continue;
1493 			}
1494 		}
1495 
1496 		/* found a match : store it */
1497 		ip += mLength;
1498 		anchor = ip;
1499 
1500 		if (ip <= ilimit) {
1501 			/* Fill Table */
1502 			hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2;
1503 			hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2;
1504 			hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
1505 			hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base);
1506 			/* check immediate repcode */
1507 			while (ip <= ilimit) {
1508 				U32 const curr2 = (U32)(ip - base);
1509 				U32 const repIndex2 = curr2 - offset_2;
1510 				const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
1511 				if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
1512 				    && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
1513 					const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
1514 					size_t const repLength2 =
1515 					    ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
1516 					U32 tmpOffset = offset_2;
1517 					offset_2 = offset_1;
1518 					offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
1519 					ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
1520 					hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2;
1521 					hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2;
1522 					ip += repLength2;
1523 					anchor = ip;
1524 					continue;
1525 				}
1526 				break;
1527 			}
1528 		}
1529 	}
1530 
1531 	/* save reps for next block */
1532 	ctx->repToConfirm[0] = offset_1;
1533 	ctx->repToConfirm[1] = offset_2;
1534 
1535 	/* Last Literals */
1536 	{
1537 		size_t const lastLLSize = iend - anchor;
1538 		memcpy(seqStorePtr->lit, anchor, lastLLSize);
1539 		seqStorePtr->lit += lastLLSize;
1540 	}
1541 }
1542 
ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx * ctx,const void * src,size_t srcSize)1543 static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
1544 {
1545 	U32 const mls = ctx->params.cParams.searchLength;
1546 	switch (mls) {
1547 	default: /* includes case 3 */
1548 	case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
1549 	case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
1550 	case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
1551 	case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
1552 	}
1553 }
1554 
1555 /*-*************************************
1556 *  Binary Tree search
1557 ***************************************/
1558 /** ZSTD_insertBt1() : add one or multiple positions to tree.
1559 *   ip : assumed <= iend-8 .
1560 *   @return : nb of positions added */
ZSTD_insertBt1(ZSTD_CCtx * zc,const BYTE * const ip,const U32 mls,const BYTE * const iend,U32 nbCompares,U32 extDict)1561 static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict)
1562 {
1563 	U32 *const hashTable = zc->hashTable;
1564 	U32 const hashLog = zc->params.cParams.hashLog;
1565 	size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
1566 	U32 *const bt = zc->chainTable;
1567 	U32 const btLog = zc->params.cParams.chainLog - 1;
1568 	U32 const btMask = (1 << btLog) - 1;
1569 	U32 matchIndex = hashTable[h];
1570 	size_t commonLengthSmaller = 0, commonLengthLarger = 0;
1571 	const BYTE *const base = zc->base;
1572 	const BYTE *const dictBase = zc->dictBase;
1573 	const U32 dictLimit = zc->dictLimit;
1574 	const BYTE *const dictEnd = dictBase + dictLimit;
1575 	const BYTE *const prefixStart = base + dictLimit;
1576 	const BYTE *match;
1577 	const U32 curr = (U32)(ip - base);
1578 	const U32 btLow = btMask >= curr ? 0 : curr - btMask;
1579 	U32 *smallerPtr = bt + 2 * (curr & btMask);
1580 	U32 *largerPtr = smallerPtr + 1;
1581 	U32 dummy32; /* to be nullified at the end */
1582 	U32 const windowLow = zc->lowLimit;
1583 	U32 matchEndIdx = curr + 8;
1584 	size_t bestLength = 8;
1585 
1586 	hashTable[h] = curr; /* Update Hash Table */
1587 
1588 	while (nbCompares-- && (matchIndex > windowLow)) {
1589 		U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
1590 		size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
1591 
1592 		if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
1593 			match = base + matchIndex;
1594 			if (match[matchLength] == ip[matchLength])
1595 				matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
1596 		} else {
1597 			match = dictBase + matchIndex;
1598 			matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
1599 			if (matchIndex + matchLength >= dictLimit)
1600 				match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
1601 		}
1602 
1603 		if (matchLength > bestLength) {
1604 			bestLength = matchLength;
1605 			if (matchLength > matchEndIdx - matchIndex)
1606 				matchEndIdx = matchIndex + (U32)matchLength;
1607 		}
1608 
1609 		if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
1610 			break;		      /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
1611 
1612 		if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */
1613 			/* match is smaller than curr */
1614 			*smallerPtr = matchIndex;	  /* update smaller idx */
1615 			commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
1616 			if (matchIndex <= btLow) {
1617 				smallerPtr = &dummy32;
1618 				break;
1619 			}			  /* beyond tree size, stop the search */
1620 			smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
1621 			matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
1622 		} else {
1623 			/* match is larger than curr */
1624 			*largerPtr = matchIndex;
1625 			commonLengthLarger = matchLength;
1626 			if (matchIndex <= btLow) {
1627 				largerPtr = &dummy32;
1628 				break;
1629 			} /* beyond tree size, stop the search */
1630 			largerPtr = nextPtr;
1631 			matchIndex = nextPtr[0];
1632 		}
1633 	}
1634 
1635 	*smallerPtr = *largerPtr = 0;
1636 	if (bestLength > 384)
1637 		return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
1638 	if (matchEndIdx > curr + 8)
1639 		return matchEndIdx - curr - 8;
1640 	return 1;
1641 }
1642 
ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx * zc,const BYTE * const ip,const BYTE * const iend,size_t * offsetPtr,U32 nbCompares,const U32 mls,U32 extDict)1643 static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls,
1644 					    U32 extDict)
1645 {
1646 	U32 *const hashTable = zc->hashTable;
1647 	U32 const hashLog = zc->params.cParams.hashLog;
1648 	size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
1649 	U32 *const bt = zc->chainTable;
1650 	U32 const btLog = zc->params.cParams.chainLog - 1;
1651 	U32 const btMask = (1 << btLog) - 1;
1652 	U32 matchIndex = hashTable[h];
1653 	size_t commonLengthSmaller = 0, commonLengthLarger = 0;
1654 	const BYTE *const base = zc->base;
1655 	const BYTE *const dictBase = zc->dictBase;
1656 	const U32 dictLimit = zc->dictLimit;
1657 	const BYTE *const dictEnd = dictBase + dictLimit;
1658 	const BYTE *const prefixStart = base + dictLimit;
1659 	const U32 curr = (U32)(ip - base);
1660 	const U32 btLow = btMask >= curr ? 0 : curr - btMask;
1661 	const U32 windowLow = zc->lowLimit;
1662 	U32 *smallerPtr = bt + 2 * (curr & btMask);
1663 	U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
1664 	U32 matchEndIdx = curr + 8;
1665 	U32 dummy32; /* to be nullified at the end */
1666 	size_t bestLength = 0;
1667 
1668 	hashTable[h] = curr; /* Update Hash Table */
1669 
1670 	while (nbCompares-- && (matchIndex > windowLow)) {
1671 		U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
1672 		size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
1673 		const BYTE *match;
1674 
1675 		if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
1676 			match = base + matchIndex;
1677 			if (match[matchLength] == ip[matchLength])
1678 				matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
1679 		} else {
1680 			match = dictBase + matchIndex;
1681 			matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
1682 			if (matchIndex + matchLength >= dictLimit)
1683 				match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
1684 		}
1685 
1686 		if (matchLength > bestLength) {
1687 			if (matchLength > matchEndIdx - matchIndex)
1688 				matchEndIdx = matchIndex + (U32)matchLength;
1689 			if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1)))
1690 				bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
1691 			if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
1692 				break;		      /* drop, to guarantee consistency (miss a little bit of compression) */
1693 		}
1694 
1695 		if (match[matchLength] < ip[matchLength]) {
1696 			/* match is smaller than curr */
1697 			*smallerPtr = matchIndex;	  /* update smaller idx */
1698 			commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
1699 			if (matchIndex <= btLow) {
1700 				smallerPtr = &dummy32;
1701 				break;
1702 			}			  /* beyond tree size, stop the search */
1703 			smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
1704 			matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
1705 		} else {
1706 			/* match is larger than curr */
1707 			*largerPtr = matchIndex;
1708 			commonLengthLarger = matchLength;
1709 			if (matchIndex <= btLow) {
1710 				largerPtr = &dummy32;
1711 				break;
1712 			} /* beyond tree size, stop the search */
1713 			largerPtr = nextPtr;
1714 			matchIndex = nextPtr[0];
1715 		}
1716 	}
1717 
1718 	*smallerPtr = *largerPtr = 0;
1719 
1720 	zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
1721 	return bestLength;
1722 }
1723 
ZSTD_updateTree(ZSTD_CCtx * zc,const BYTE * const ip,const BYTE * const iend,const U32 nbCompares,const U32 mls)1724 static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
1725 {
1726 	const BYTE *const base = zc->base;
1727 	const U32 target = (U32)(ip - base);
1728 	U32 idx = zc->nextToUpdate;
1729 
1730 	while (idx < target)
1731 		idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0);
1732 }
1733 
1734 /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
ZSTD_BtFindBestMatch(ZSTD_CCtx * zc,const BYTE * const ip,const BYTE * const iLimit,size_t * offsetPtr,const U32 maxNbAttempts,const U32 mls)1735 static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls)
1736 {
1737 	if (ip < zc->base + zc->nextToUpdate)
1738 		return 0; /* skipped area */
1739 	ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
1740 	return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
1741 }
1742 
ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx * zc,const BYTE * ip,const BYTE * const iLimit,size_t * offsetPtr,const U32 maxNbAttempts,const U32 matchLengthSearch)1743 static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
1744 					     const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch)
1745 {
1746 	switch (matchLengthSearch) {
1747 	default: /* includes case 3 */
1748 	case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
1749 	case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
1750 	case 7:
1751 	case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
1752 	}
1753 }
1754 
ZSTD_updateTree_extDict(ZSTD_CCtx * zc,const BYTE * const ip,const BYTE * const iend,const U32 nbCompares,const U32 mls)1755 static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
1756 {
1757 	const BYTE *const base = zc->base;
1758 	const U32 target = (U32)(ip - base);
1759 	U32 idx = zc->nextToUpdate;
1760 
1761 	while (idx < target)
1762 		idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1);
1763 }
1764 
1765 /** Tree updater, providing best match */
ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx * zc,const BYTE * const ip,const BYTE * const iLimit,size_t * offsetPtr,const U32 maxNbAttempts,const U32 mls)1766 static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
1767 					   const U32 mls)
1768 {
1769 	if (ip < zc->base + zc->nextToUpdate)
1770 		return 0; /* skipped area */
1771 	ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
1772 	return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
1773 }
1774 
ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx * zc,const BYTE * ip,const BYTE * const iLimit,size_t * offsetPtr,const U32 maxNbAttempts,const U32 matchLengthSearch)1775 static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
1776 						     const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
1777 						     const U32 matchLengthSearch)
1778 {
1779 	switch (matchLengthSearch) {
1780 	default: /* includes case 3 */
1781 	case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
1782 	case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
1783 	case 7:
1784 	case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
1785 	}
1786 }
1787 
1788 /* *********************************
1789 *  Hash Chain
1790 ***********************************/
1791 #define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask]
1792 
1793 /* Update chains up to ip (excluded)
1794    Assumption : always within prefix (i.e. not within extDict) */
1795 FORCE_INLINE
ZSTD_insertAndFindFirstIndex(ZSTD_CCtx * zc,const BYTE * ip,U32 mls)1796 U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls)
1797 {
1798 	U32 *const hashTable = zc->hashTable;
1799 	const U32 hashLog = zc->params.cParams.hashLog;
1800 	U32 *const chainTable = zc->chainTable;
1801 	const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
1802 	const BYTE *const base = zc->base;
1803 	const U32 target = (U32)(ip - base);
1804 	U32 idx = zc->nextToUpdate;
1805 
1806 	while (idx < target) { /* catch up */
1807 		size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls);
1808 		NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
1809 		hashTable[h] = idx;
1810 		idx++;
1811 	}
1812 
1813 	zc->nextToUpdate = target;
1814 	return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
1815 }
1816 
1817 /* inlining is important to hardwire a hot branch (template emulation) */
1818 FORCE_INLINE
ZSTD_HcFindBestMatch_generic(ZSTD_CCtx * zc,const BYTE * const ip,const BYTE * const iLimit,size_t * offsetPtr,const U32 maxNbAttempts,const U32 mls,const U32 extDict)1819 size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc, /* Index table will be updated */
1820 				    const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls,
1821 				    const U32 extDict)
1822 {
1823 	U32 *const chainTable = zc->chainTable;
1824 	const U32 chainSize = (1 << zc->params.cParams.chainLog);
1825 	const U32 chainMask = chainSize - 1;
1826 	const BYTE *const base = zc->base;
1827 	const BYTE *const dictBase = zc->dictBase;
1828 	const U32 dictLimit = zc->dictLimit;
1829 	const BYTE *const prefixStart = base + dictLimit;
1830 	const BYTE *const dictEnd = dictBase + dictLimit;
1831 	const U32 lowLimit = zc->lowLimit;
1832 	const U32 curr = (U32)(ip - base);
1833 	const U32 minChain = curr > chainSize ? curr - chainSize : 0;
1834 	int nbAttempts = maxNbAttempts;
1835 	size_t ml = EQUAL_READ32 - 1;
1836 
1837 	/* HC4 match finder */
1838 	U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls);
1839 
1840 	for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) {
1841 		const BYTE *match;
1842 		size_t currMl = 0;
1843 		if ((!extDict) || matchIndex >= dictLimit) {
1844 			match = base + matchIndex;
1845 			if (match[ml] == ip[ml]) /* potentially better */
1846 				currMl = ZSTD_count(ip, match, iLimit);
1847 		} else {
1848 			match = dictBase + matchIndex;
1849 			if (ZSTD_read32(match) == ZSTD_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
1850 				currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
1851 		}
1852 
1853 		/* save best solution */
1854 		if (currMl > ml) {
1855 			ml = currMl;
1856 			*offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
1857 			if (ip + currMl == iLimit)
1858 				break; /* best possible, and avoid read overflow*/
1859 		}
1860 
1861 		if (matchIndex <= minChain)
1862 			break;
1863 		matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
1864 	}
1865 
1866 	return ml;
1867 }
1868 
ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx * zc,const BYTE * ip,const BYTE * const iLimit,size_t * offsetPtr,const U32 maxNbAttempts,const U32 matchLengthSearch)1869 FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
1870 						   const U32 matchLengthSearch)
1871 {
1872 	switch (matchLengthSearch) {
1873 	default: /* includes case 3 */
1874 	case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
1875 	case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
1876 	case 7:
1877 	case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
1878 	}
1879 }
1880 
ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx * zc,const BYTE * ip,const BYTE * const iLimit,size_t * offsetPtr,const U32 maxNbAttempts,const U32 matchLengthSearch)1881 FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
1882 							   const U32 matchLengthSearch)
1883 {
1884 	switch (matchLengthSearch) {
1885 	default: /* includes case 3 */
1886 	case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
1887 	case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
1888 	case 7:
1889 	case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
1890 	}
1891 }
1892 
1893 /* *******************************
1894 *  Common parser - lazy strategy
1895 *********************************/
1896 FORCE_INLINE
ZSTD_compressBlock_lazy_generic(ZSTD_CCtx * ctx,const void * src,size_t srcSize,const U32 searchMethod,const U32 depth)1897 void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
1898 {
1899 	seqStore_t *seqStorePtr = &(ctx->seqStore);
1900 	const BYTE *const istart = (const BYTE *)src;
1901 	const BYTE *ip = istart;
1902 	const BYTE *anchor = istart;
1903 	const BYTE *const iend = istart + srcSize;
1904 	const BYTE *const ilimit = iend - 8;
1905 	const BYTE *const base = ctx->base + ctx->dictLimit;
1906 
1907 	U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
1908 	U32 const mls = ctx->params.cParams.searchLength;
1909 
1910 	typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
1911 	searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
1912 	U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0;
1913 
1914 	/* init */
1915 	ip += (ip == base);
1916 	ctx->nextToUpdate3 = ctx->nextToUpdate;
1917 	{
1918 		U32 const maxRep = (U32)(ip - base);
1919 		if (offset_2 > maxRep)
1920 			savedOffset = offset_2, offset_2 = 0;
1921 		if (offset_1 > maxRep)
1922 			savedOffset = offset_1, offset_1 = 0;
1923 	}
1924 
1925 	/* Match Loop */
1926 	while (ip < ilimit) {
1927 		size_t matchLength = 0;
1928 		size_t offset = 0;
1929 		const BYTE *start = ip + 1;
1930 
1931 		/* check repCode */
1932 		if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
1933 			/* repcode : we take it */
1934 			matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
1935 			if (depth == 0)
1936 				goto _storeSequence;
1937 		}
1938 
1939 		/* first search (depth 0) */
1940 		{
1941 			size_t offsetFound = 99999999;
1942 			size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
1943 			if (ml2 > matchLength)
1944 				matchLength = ml2, start = ip, offset = offsetFound;
1945 		}
1946 
1947 		if (matchLength < EQUAL_READ32) {
1948 			ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
1949 			continue;
1950 		}
1951 
1952 		/* let's try to find a better solution */
1953 		if (depth >= 1)
1954 			while (ip < ilimit) {
1955 				ip++;
1956 				if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
1957 					size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
1958 					int const gain2 = (int)(mlRep * 3);
1959 					int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
1960 					if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
1961 						matchLength = mlRep, offset = 0, start = ip;
1962 				}
1963 				{
1964 					size_t offset2 = 99999999;
1965 					size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
1966 					int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
1967 					int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
1968 					if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
1969 						matchLength = ml2, offset = offset2, start = ip;
1970 						continue; /* search a better one */
1971 					}
1972 				}
1973 
1974 				/* let's find an even better one */
1975 				if ((depth == 2) && (ip < ilimit)) {
1976 					ip++;
1977 					if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
1978 						size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
1979 						int const gain2 = (int)(ml2 * 4);
1980 						int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
1981 						if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
1982 							matchLength = ml2, offset = 0, start = ip;
1983 					}
1984 					{
1985 						size_t offset2 = 99999999;
1986 						size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
1987 						int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
1988 						int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
1989 						if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
1990 							matchLength = ml2, offset = offset2, start = ip;
1991 							continue;
1992 						}
1993 					}
1994 				}
1995 				break; /* nothing found : store previous solution */
1996 			}
1997 
1998 		/* NOTE:
1999 		 * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
2000 		 * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
2001 		 * overflows the pointer, which is undefined behavior.
2002 		 */
2003 		/* catch up */
2004 		if (offset) {
2005 			while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) &&
2006 			       (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */
2007 			{
2008 				start--;
2009 				matchLength++;
2010 			}
2011 			offset_2 = offset_1;
2012 			offset_1 = (U32)(offset - ZSTD_REP_MOVE);
2013 		}
2014 
2015 	/* store sequence */
2016 _storeSequence:
2017 		{
2018 			size_t const litLength = start - anchor;
2019 			ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
2020 			anchor = ip = start + matchLength;
2021 		}
2022 
2023 		/* check immediate repcode */
2024 		while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
2025 			/* store sequence */
2026 			matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
2027 			offset = offset_2;
2028 			offset_2 = offset_1;
2029 			offset_1 = (U32)offset; /* swap repcodes */
2030 			ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
2031 			ip += matchLength;
2032 			anchor = ip;
2033 			continue; /* faster when present ... (?) */
2034 		}
2035 	}
2036 
2037 	/* Save reps for next block */
2038 	ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
2039 	ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
2040 
2041 	/* Last Literals */
2042 	{
2043 		size_t const lastLLSize = iend - anchor;
2044 		memcpy(seqStorePtr->lit, anchor, lastLLSize);
2045 		seqStorePtr->lit += lastLLSize;
2046 	}
2047 }
2048 
ZSTD_compressBlock_btlazy2(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2049 static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); }
2050 
ZSTD_compressBlock_lazy2(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2051 static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); }
2052 
ZSTD_compressBlock_lazy(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2053 static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); }
2054 
ZSTD_compressBlock_greedy(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2055 static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); }
2056 
2057 FORCE_INLINE
ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx * ctx,const void * src,size_t srcSize,const U32 searchMethod,const U32 depth)2058 void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
2059 {
2060 	seqStore_t *seqStorePtr = &(ctx->seqStore);
2061 	const BYTE *const istart = (const BYTE *)src;
2062 	const BYTE *ip = istart;
2063 	const BYTE *anchor = istart;
2064 	const BYTE *const iend = istart + srcSize;
2065 	const BYTE *const ilimit = iend - 8;
2066 	const BYTE *const base = ctx->base;
2067 	const U32 dictLimit = ctx->dictLimit;
2068 	const U32 lowestIndex = ctx->lowLimit;
2069 	const BYTE *const prefixStart = base + dictLimit;
2070 	const BYTE *const dictBase = ctx->dictBase;
2071 	const BYTE *const dictEnd = dictBase + dictLimit;
2072 	const BYTE *const dictStart = dictBase + ctx->lowLimit;
2073 
2074 	const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
2075 	const U32 mls = ctx->params.cParams.searchLength;
2076 
2077 	typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
2078 	searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
2079 
2080 	U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
2081 
2082 	/* init */
2083 	ctx->nextToUpdate3 = ctx->nextToUpdate;
2084 	ip += (ip == prefixStart);
2085 
2086 	/* Match Loop */
2087 	while (ip < ilimit) {
2088 		size_t matchLength = 0;
2089 		size_t offset = 0;
2090 		const BYTE *start = ip + 1;
2091 		U32 curr = (U32)(ip - base);
2092 
2093 		/* check repCode */
2094 		{
2095 			const U32 repIndex = (U32)(curr + 1 - offset_1);
2096 			const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
2097 			const BYTE *const repMatch = repBase + repIndex;
2098 			if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
2099 				if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) {
2100 					/* repcode detected we should take it */
2101 					const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
2102 					matchLength =
2103 					    ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
2104 					if (depth == 0)
2105 						goto _storeSequence;
2106 				}
2107 		}
2108 
2109 		/* first search (depth 0) */
2110 		{
2111 			size_t offsetFound = 99999999;
2112 			size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
2113 			if (ml2 > matchLength)
2114 				matchLength = ml2, start = ip, offset = offsetFound;
2115 		}
2116 
2117 		if (matchLength < EQUAL_READ32) {
2118 			ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
2119 			continue;
2120 		}
2121 
2122 		/* let's try to find a better solution */
2123 		if (depth >= 1)
2124 			while (ip < ilimit) {
2125 				ip++;
2126 				curr++;
2127 				/* check repCode */
2128 				if (offset) {
2129 					const U32 repIndex = (U32)(curr - offset_1);
2130 					const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
2131 					const BYTE *const repMatch = repBase + repIndex;
2132 					if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
2133 						if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
2134 							/* repcode detected */
2135 							const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
2136 							size_t const repLength =
2137 							    ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) +
2138 							    EQUAL_READ32;
2139 							int const gain2 = (int)(repLength * 3);
2140 							int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
2141 							if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
2142 								matchLength = repLength, offset = 0, start = ip;
2143 						}
2144 				}
2145 
2146 				/* search match, depth 1 */
2147 				{
2148 					size_t offset2 = 99999999;
2149 					size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
2150 					int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
2151 					int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
2152 					if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
2153 						matchLength = ml2, offset = offset2, start = ip;
2154 						continue; /* search a better one */
2155 					}
2156 				}
2157 
2158 				/* let's find an even better one */
2159 				if ((depth == 2) && (ip < ilimit)) {
2160 					ip++;
2161 					curr++;
2162 					/* check repCode */
2163 					if (offset) {
2164 						const U32 repIndex = (U32)(curr - offset_1);
2165 						const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
2166 						const BYTE *const repMatch = repBase + repIndex;
2167 						if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
2168 							if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
2169 								/* repcode detected */
2170 								const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
2171 								size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend,
2172 													repEnd, prefixStart) +
2173 										   EQUAL_READ32;
2174 								int gain2 = (int)(repLength * 4);
2175 								int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
2176 								if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
2177 									matchLength = repLength, offset = 0, start = ip;
2178 							}
2179 					}
2180 
2181 					/* search match, depth 2 */
2182 					{
2183 						size_t offset2 = 99999999;
2184 						size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
2185 						int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
2186 						int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
2187 						if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
2188 							matchLength = ml2, offset = offset2, start = ip;
2189 							continue;
2190 						}
2191 					}
2192 				}
2193 				break; /* nothing found : store previous solution */
2194 			}
2195 
2196 		/* catch up */
2197 		if (offset) {
2198 			U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE));
2199 			const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
2200 			const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
2201 			while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) {
2202 				start--;
2203 				match--;
2204 				matchLength++;
2205 			} /* catch up */
2206 			offset_2 = offset_1;
2207 			offset_1 = (U32)(offset - ZSTD_REP_MOVE);
2208 		}
2209 
2210 	/* store sequence */
2211 	_storeSequence : {
2212 		size_t const litLength = start - anchor;
2213 		ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
2214 		anchor = ip = start + matchLength;
2215 	}
2216 
2217 		/* check immediate repcode */
2218 		while (ip <= ilimit) {
2219 			const U32 repIndex = (U32)((ip - base) - offset_2);
2220 			const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
2221 			const BYTE *const repMatch = repBase + repIndex;
2222 			if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
2223 				if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
2224 					/* repcode detected we should take it */
2225 					const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
2226 					matchLength =
2227 					    ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
2228 					offset = offset_2;
2229 					offset_2 = offset_1;
2230 					offset_1 = (U32)offset; /* swap offset history */
2231 					ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
2232 					ip += matchLength;
2233 					anchor = ip;
2234 					continue; /* faster when present ... (?) */
2235 				}
2236 			break;
2237 		}
2238 	}
2239 
2240 	/* Save reps for next block */
2241 	ctx->repToConfirm[0] = offset_1;
2242 	ctx->repToConfirm[1] = offset_2;
2243 
2244 	/* Last Literals */
2245 	{
2246 		size_t const lastLLSize = iend - anchor;
2247 		memcpy(seqStorePtr->lit, anchor, lastLLSize);
2248 		seqStorePtr->lit += lastLLSize;
2249 	}
2250 }
2251 
ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2252 void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); }
2253 
ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2254 static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2255 {
2256 	ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
2257 }
2258 
ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2259 static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2260 {
2261 	ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
2262 }
2263 
ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2264 static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2265 {
2266 	ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
2267 }
2268 
2269 /* The optimal parser */
2270 #include "zstd_opt.h"
2271 
ZSTD_compressBlock_btopt(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2272 static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2273 {
2274 #ifdef ZSTD_OPT_H_91842398743
2275 	ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
2276 #else
2277 	(void)ctx;
2278 	(void)src;
2279 	(void)srcSize;
2280 	return;
2281 #endif
2282 }
2283 
ZSTD_compressBlock_btopt2(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2284 static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2285 {
2286 #ifdef ZSTD_OPT_H_91842398743
2287 	ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
2288 #else
2289 	(void)ctx;
2290 	(void)src;
2291 	(void)srcSize;
2292 	return;
2293 #endif
2294 }
2295 
ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2296 static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2297 {
2298 #ifdef ZSTD_OPT_H_91842398743
2299 	ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
2300 #else
2301 	(void)ctx;
2302 	(void)src;
2303 	(void)srcSize;
2304 	return;
2305 #endif
2306 }
2307 
ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx * ctx,const void * src,size_t srcSize)2308 static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2309 {
2310 #ifdef ZSTD_OPT_H_91842398743
2311 	ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
2312 #else
2313 	(void)ctx;
2314 	(void)src;
2315 	(void)srcSize;
2316 	return;
2317 #endif
2318 }
2319 
2320 typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize);
2321 
ZSTD_selectBlockCompressor(ZSTD_strategy strat,int extDict)2322 static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
2323 {
2324 	static const ZSTD_blockCompressor blockCompressor[2][8] = {
2325 	    {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2,
2326 	     ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2},
2327 	    {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,
2328 	     ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}};
2329 
2330 	return blockCompressor[extDict][(U32)strat];
2331 }
2332 
ZSTD_compressBlock_internal(ZSTD_CCtx * zc,void * dst,size_t dstCapacity,const void * src,size_t srcSize)2333 static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
2334 {
2335 	ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
2336 	const BYTE *const base = zc->base;
2337 	const BYTE *const istart = (const BYTE *)src;
2338 	const U32 curr = (U32)(istart - base);
2339 	if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1)
2340 		return 0; /* don't even attempt compression below a certain srcSize */
2341 	ZSTD_resetSeqStore(&(zc->seqStore));
2342 	if (curr > zc->nextToUpdate + 384)
2343 		zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
2344 	blockCompressor(zc, src, srcSize);
2345 	return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
2346 }
2347 
2348 /*! ZSTD_compress_generic() :
2349 *   Compress a chunk of data into one or multiple blocks.
2350 *   All blocks will be terminated, all input will be consumed.
2351 *   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
2352 *   Frame is supposed already started (header already produced)
2353 *   @return : compressed size, or an error code
2354 */
ZSTD_compress_generic(ZSTD_CCtx * cctx,void * dst,size_t dstCapacity,const void * src,size_t srcSize,U32 lastFrameChunk)2355 static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk)
2356 {
2357 	size_t blockSize = cctx->blockSize;
2358 	size_t remaining = srcSize;
2359 	const BYTE *ip = (const BYTE *)src;
2360 	BYTE *const ostart = (BYTE *)dst;
2361 	BYTE *op = ostart;
2362 	U32 const maxDist = 1 << cctx->params.cParams.windowLog;
2363 
2364 	if (cctx->params.fParams.checksumFlag && srcSize)
2365 		xxh64_update(&cctx->xxhState, src, srcSize);
2366 
2367 	while (remaining) {
2368 		U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
2369 		size_t cSize;
2370 
2371 		if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
2372 			return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
2373 		if (remaining < blockSize)
2374 			blockSize = remaining;
2375 
2376 		/* preemptive overflow correction */
2377 		if (cctx->lowLimit > (3U << 29)) {
2378 			U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
2379 			U32 const curr = (U32)(ip - cctx->base);
2380 			U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog);
2381 			U32 const correction = curr - newCurr;
2382 			ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
2383 			ZSTD_reduceIndex(cctx, correction);
2384 			cctx->base += correction;
2385 			cctx->dictBase += correction;
2386 			cctx->lowLimit -= correction;
2387 			cctx->dictLimit -= correction;
2388 			if (cctx->nextToUpdate < correction)
2389 				cctx->nextToUpdate = 0;
2390 			else
2391 				cctx->nextToUpdate -= correction;
2392 		}
2393 
2394 		if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
2395 			/* enforce maxDist */
2396 			U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist;
2397 			if (cctx->lowLimit < newLowLimit)
2398 				cctx->lowLimit = newLowLimit;
2399 			if (cctx->dictLimit < cctx->lowLimit)
2400 				cctx->dictLimit = cctx->lowLimit;
2401 		}
2402 
2403 		cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize);
2404 		if (ZSTD_isError(cSize))
2405 			return cSize;
2406 
2407 		if (cSize == 0) { /* block is not compressible */
2408 			U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3);
2409 			if (blockSize + ZSTD_blockHeaderSize > dstCapacity)
2410 				return ERROR(dstSize_tooSmall);
2411 			ZSTD_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */
2412 			memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
2413 			cSize = ZSTD_blockHeaderSize + blockSize;
2414 		} else {
2415 			U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3);
2416 			ZSTD_writeLE24(op, cBlockHeader24);
2417 			cSize += ZSTD_blockHeaderSize;
2418 		}
2419 
2420 		remaining -= blockSize;
2421 		dstCapacity -= cSize;
2422 		ip += blockSize;
2423 		op += cSize;
2424 	}
2425 
2426 	if (lastFrameChunk && (op > ostart))
2427 		cctx->stage = ZSTDcs_ending;
2428 	return op - ostart;
2429 }
2430 
ZSTD_writeFrameHeader(void * dst,size_t dstCapacity,ZSTD_parameters params,U64 pledgedSrcSize,U32 dictID)2431 static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
2432 {
2433 	BYTE *const op = (BYTE *)dst;
2434 	U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536); /* 0-3 */
2435 	U32 const checksumFlag = params.fParams.checksumFlag > 0;
2436 	U32 const windowSize = 1U << params.cParams.windowLog;
2437 	U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
2438 	BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
2439 	U32 const fcsCode =
2440 	    params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */
2441 	BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
2442 	size_t pos;
2443 
2444 	if (dstCapacity < ZSTD_frameHeaderSize_max)
2445 		return ERROR(dstSize_tooSmall);
2446 
2447 	ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER);
2448 	op[4] = frameHeaderDecriptionByte;
2449 	pos = 5;
2450 	if (!singleSegment)
2451 		op[pos++] = windowLogByte;
2452 	switch (dictIDSizeCode) {
2453 	default: /* impossible */
2454 	case 0: break;
2455 	case 1:
2456 		op[pos] = (BYTE)(dictID);
2457 		pos++;
2458 		break;
2459 	case 2:
2460 		ZSTD_writeLE16(op + pos, (U16)dictID);
2461 		pos += 2;
2462 		break;
2463 	case 3:
2464 		ZSTD_writeLE32(op + pos, dictID);
2465 		pos += 4;
2466 		break;
2467 	}
2468 	switch (fcsCode) {
2469 	default: /* impossible */
2470 	case 0:
2471 		if (singleSegment)
2472 			op[pos++] = (BYTE)(pledgedSrcSize);
2473 		break;
2474 	case 1:
2475 		ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256));
2476 		pos += 2;
2477 		break;
2478 	case 2:
2479 		ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize));
2480 		pos += 4;
2481 		break;
2482 	case 3:
2483 		ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize));
2484 		pos += 8;
2485 		break;
2486 	}
2487 	return pos;
2488 }
2489 
ZSTD_compressContinue_internal(ZSTD_CCtx * cctx,void * dst,size_t dstCapacity,const void * src,size_t srcSize,U32 frame,U32 lastFrameChunk)2490 static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk)
2491 {
2492 	const BYTE *const ip = (const BYTE *)src;
2493 	size_t fhSize = 0;
2494 
2495 	if (cctx->stage == ZSTDcs_created)
2496 		return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
2497 
2498 	if (frame && (cctx->stage == ZSTDcs_init)) {
2499 		fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
2500 		if (ZSTD_isError(fhSize))
2501 			return fhSize;
2502 		dstCapacity -= fhSize;
2503 		dst = (char *)dst + fhSize;
2504 		cctx->stage = ZSTDcs_ongoing;
2505 	}
2506 
2507 	/* Check if blocks follow each other */
2508 	if (src != cctx->nextSrc) {
2509 		/* not contiguous */
2510 		ptrdiff_t const delta = cctx->nextSrc - ip;
2511 		cctx->lowLimit = cctx->dictLimit;
2512 		cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
2513 		cctx->dictBase = cctx->base;
2514 		cctx->base -= delta;
2515 		cctx->nextToUpdate = cctx->dictLimit;
2516 		if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE)
2517 			cctx->lowLimit = cctx->dictLimit; /* too small extDict */
2518 	}
2519 
2520 	/* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
2521 	if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
2522 		ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
2523 		U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
2524 		cctx->lowLimit = lowLimitMax;
2525 	}
2526 
2527 	cctx->nextSrc = ip + srcSize;
2528 
2529 	if (srcSize) {
2530 		size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk)
2531 					   : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize);
2532 		if (ZSTD_isError(cSize))
2533 			return cSize;
2534 		return cSize + fhSize;
2535 	} else
2536 		return fhSize;
2537 }
2538 
ZSTD_compressContinue(ZSTD_CCtx * cctx,void * dst,size_t dstCapacity,const void * src,size_t srcSize)2539 size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
2540 {
2541 	return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
2542 }
2543 
ZSTD_getBlockSizeMax(ZSTD_CCtx * cctx)2544 size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); }
2545 
ZSTD_compressBlock(ZSTD_CCtx * cctx,void * dst,size_t dstCapacity,const void * src,size_t srcSize)2546 size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
2547 {
2548 	size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
2549 	if (srcSize > blockSizeMax)
2550 		return ERROR(srcSize_wrong);
2551 	return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
2552 }
2553 
2554 /*! ZSTD_loadDictionaryContent() :
2555  *  @return : 0, or an error code
2556  */
ZSTD_loadDictionaryContent(ZSTD_CCtx * zc,const void * src,size_t srcSize)2557 static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize)
2558 {
2559 	const BYTE *const ip = (const BYTE *)src;
2560 	const BYTE *const iend = ip + srcSize;
2561 
2562 	/* input becomes curr prefix */
2563 	zc->lowLimit = zc->dictLimit;
2564 	zc->dictLimit = (U32)(zc->nextSrc - zc->base);
2565 	zc->dictBase = zc->base;
2566 	zc->base += ip - zc->nextSrc;
2567 	zc->nextToUpdate = zc->dictLimit;
2568 	zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
2569 
2570 	zc->nextSrc = iend;
2571 	if (srcSize <= HASH_READ_SIZE)
2572 		return 0;
2573 
2574 	switch (zc->params.cParams.strategy) {
2575 	case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break;
2576 
2577 	case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break;
2578 
2579 	case ZSTD_greedy:
2580 	case ZSTD_lazy:
2581 	case ZSTD_lazy2:
2582 		if (srcSize >= HASH_READ_SIZE)
2583 			ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength);
2584 		break;
2585 
2586 	case ZSTD_btlazy2:
2587 	case ZSTD_btopt:
2588 	case ZSTD_btopt2:
2589 		if (srcSize >= HASH_READ_SIZE)
2590 			ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
2591 		break;
2592 
2593 	default:
2594 		return ERROR(GENERIC); /* strategy doesn't exist; impossible */
2595 	}
2596 
2597 	zc->nextToUpdate = (U32)(iend - zc->base);
2598 	return 0;
2599 }
2600 
2601 /* Dictionaries that assign zero probability to symbols that show up causes problems
2602    when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
2603    that we may encounter during compression.
2604    NOTE: This behavior is not standard and could be improved in the future. */
ZSTD_checkDictNCount(short * normalizedCounter,unsigned dictMaxSymbolValue,unsigned maxSymbolValue)2605 static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
2606 {
2607 	U32 s;
2608 	if (dictMaxSymbolValue < maxSymbolValue)
2609 		return ERROR(dictionary_corrupted);
2610 	for (s = 0; s <= maxSymbolValue; ++s) {
2611 		if (normalizedCounter[s] == 0)
2612 			return ERROR(dictionary_corrupted);
2613 	}
2614 	return 0;
2615 }
2616 
2617 /* Dictionary format :
2618  * See :
2619  * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
2620  */
2621 /*! ZSTD_loadZstdDictionary() :
2622  * @return : 0, or an error code
2623  *  assumptions : magic number supposed already checked
2624  *                dictSize supposed > 8
2625  */
ZSTD_loadZstdDictionary(ZSTD_CCtx * cctx,const void * dict,size_t dictSize)2626 static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
2627 {
2628 	const BYTE *dictPtr = (const BYTE *)dict;
2629 	const BYTE *const dictEnd = dictPtr + dictSize;
2630 	short offcodeNCount[MaxOff + 1];
2631 	unsigned offcodeMaxValue = MaxOff;
2632 
2633 	dictPtr += 4; /* skip magic number */
2634 	cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr);
2635 	dictPtr += 4;
2636 
2637 	{
2638 		size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters));
2639 		if (HUF_isError(hufHeaderSize))
2640 			return ERROR(dictionary_corrupted);
2641 		dictPtr += hufHeaderSize;
2642 	}
2643 
2644 	{
2645 		unsigned offcodeLog;
2646 		size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
2647 		if (FSE_isError(offcodeHeaderSize))
2648 			return ERROR(dictionary_corrupted);
2649 		if (offcodeLog > OffFSELog)
2650 			return ERROR(dictionary_corrupted);
2651 		/* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
2652 		CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
2653 			dictionary_corrupted);
2654 		dictPtr += offcodeHeaderSize;
2655 	}
2656 
2657 	{
2658 		short matchlengthNCount[MaxML + 1];
2659 		unsigned matchlengthMaxValue = MaxML, matchlengthLog;
2660 		size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
2661 		if (FSE_isError(matchlengthHeaderSize))
2662 			return ERROR(dictionary_corrupted);
2663 		if (matchlengthLog > MLFSELog)
2664 			return ERROR(dictionary_corrupted);
2665 		/* Every match length code must have non-zero probability */
2666 		CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
2667 		CHECK_E(
2668 		    FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
2669 		    dictionary_corrupted);
2670 		dictPtr += matchlengthHeaderSize;
2671 	}
2672 
2673 	{
2674 		short litlengthNCount[MaxLL + 1];
2675 		unsigned litlengthMaxValue = MaxLL, litlengthLog;
2676 		size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
2677 		if (FSE_isError(litlengthHeaderSize))
2678 			return ERROR(dictionary_corrupted);
2679 		if (litlengthLog > LLFSELog)
2680 			return ERROR(dictionary_corrupted);
2681 		/* Every literal length code must have non-zero probability */
2682 		CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
2683 		CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
2684 			dictionary_corrupted);
2685 		dictPtr += litlengthHeaderSize;
2686 	}
2687 
2688 	if (dictPtr + 12 > dictEnd)
2689 		return ERROR(dictionary_corrupted);
2690 	cctx->rep[0] = ZSTD_readLE32(dictPtr + 0);
2691 	cctx->rep[1] = ZSTD_readLE32(dictPtr + 4);
2692 	cctx->rep[2] = ZSTD_readLE32(dictPtr + 8);
2693 	dictPtr += 12;
2694 
2695 	{
2696 		size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
2697 		U32 offcodeMax = MaxOff;
2698 		if (dictContentSize <= ((U32)-1) - 128 KB) {
2699 			U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
2700 			offcodeMax = ZSTD_highbit32(maxOffset);		     /* Calculate minimum offset code required to represent maxOffset */
2701 		}
2702 		/* All offset values <= dictContentSize + 128 KB must be representable */
2703 		CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
2704 		/* All repCodes must be <= dictContentSize and != 0*/
2705 		{
2706 			U32 u;
2707 			for (u = 0; u < 3; u++) {
2708 				if (cctx->rep[u] == 0)
2709 					return ERROR(dictionary_corrupted);
2710 				if (cctx->rep[u] > dictContentSize)
2711 					return ERROR(dictionary_corrupted);
2712 			}
2713 		}
2714 
2715 		cctx->flagStaticTables = 1;
2716 		cctx->flagStaticHufTable = HUF_repeat_valid;
2717 		return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
2718 	}
2719 }
2720 
2721 /** ZSTD_compress_insertDictionary() :
2722 *   @return : 0, or an error code */
ZSTD_compress_insertDictionary(ZSTD_CCtx * cctx,const void * dict,size_t dictSize)2723 static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
2724 {
2725 	if ((dict == NULL) || (dictSize <= 8))
2726 		return 0;
2727 
2728 	/* dict as pure content */
2729 	if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict))
2730 		return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
2731 
2732 	/* dict as zstd dictionary */
2733 	return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
2734 }
2735 
2736 /*! ZSTD_compressBegin_internal() :
2737 *   @return : 0, or an error code */
ZSTD_compressBegin_internal(ZSTD_CCtx * cctx,const void * dict,size_t dictSize,ZSTD_parameters params,U64 pledgedSrcSize)2738 static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize)
2739 {
2740 	ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
2741 	CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
2742 	return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
2743 }
2744 
2745 /*! ZSTD_compressBegin_advanced() :
2746 *   @return : 0, or an error code */
ZSTD_compressBegin_advanced(ZSTD_CCtx * cctx,const void * dict,size_t dictSize,ZSTD_parameters params,unsigned long long pledgedSrcSize)2747 size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
2748 {
2749 	/* compression parameters verification and optimization */
2750 	CHECK_F(ZSTD_checkCParams(params.cParams));
2751 	return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
2752 }
2753 
ZSTD_compressBegin_usingDict(ZSTD_CCtx * cctx,const void * dict,size_t dictSize,int compressionLevel)2754 size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel)
2755 {
2756 	ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
2757 	return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
2758 }
2759 
ZSTD_compressBegin(ZSTD_CCtx * cctx,int compressionLevel)2760 size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); }
2761 
2762 /*! ZSTD_writeEpilogue() :
2763 *   Ends a frame.
2764 *   @return : nb of bytes written into dst (or an error code) */
ZSTD_writeEpilogue(ZSTD_CCtx * cctx,void * dst,size_t dstCapacity)2765 static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity)
2766 {
2767 	BYTE *const ostart = (BYTE *)dst;
2768 	BYTE *op = ostart;
2769 	size_t fhSize = 0;
2770 
2771 	if (cctx->stage == ZSTDcs_created)
2772 		return ERROR(stage_wrong); /* init missing */
2773 
2774 	/* special case : empty frame */
2775 	if (cctx->stage == ZSTDcs_init) {
2776 		fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
2777 		if (ZSTD_isError(fhSize))
2778 			return fhSize;
2779 		dstCapacity -= fhSize;
2780 		op += fhSize;
2781 		cctx->stage = ZSTDcs_ongoing;
2782 	}
2783 
2784 	if (cctx->stage != ZSTDcs_ending) {
2785 		/* write one last empty block, make it the "last" block */
2786 		U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw) << 1) + 0;
2787 		if (dstCapacity < 4)
2788 			return ERROR(dstSize_tooSmall);
2789 		ZSTD_writeLE32(op, cBlockHeader24);
2790 		op += ZSTD_blockHeaderSize;
2791 		dstCapacity -= ZSTD_blockHeaderSize;
2792 	}
2793 
2794 	if (cctx->params.fParams.checksumFlag) {
2795 		U32 const checksum = (U32)xxh64_digest(&cctx->xxhState);
2796 		if (dstCapacity < 4)
2797 			return ERROR(dstSize_tooSmall);
2798 		ZSTD_writeLE32(op, checksum);
2799 		op += 4;
2800 	}
2801 
2802 	cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
2803 	return op - ostart;
2804 }
2805 
ZSTD_compressEnd(ZSTD_CCtx * cctx,void * dst,size_t dstCapacity,const void * src,size_t srcSize)2806 size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
2807 {
2808 	size_t endResult;
2809 	size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
2810 	if (ZSTD_isError(cSize))
2811 		return cSize;
2812 	endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize);
2813 	if (ZSTD_isError(endResult))
2814 		return endResult;
2815 	return cSize + endResult;
2816 }
2817 
ZSTD_compress_internal(ZSTD_CCtx * cctx,void * dst,size_t dstCapacity,const void * src,size_t srcSize,const void * dict,size_t dictSize,ZSTD_parameters params)2818 static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
2819 				     ZSTD_parameters params)
2820 {
2821 	CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
2822 	return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
2823 }
2824 
ZSTD_compress_usingDict(ZSTD_CCtx * ctx,void * dst,size_t dstCapacity,const void * src,size_t srcSize,const void * dict,size_t dictSize,ZSTD_parameters params)2825 size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
2826 			       ZSTD_parameters params)
2827 {
2828 	return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
2829 }
2830 
ZSTD_compressCCtx(ZSTD_CCtx * ctx,void * dst,size_t dstCapacity,const void * src,size_t srcSize,ZSTD_parameters params)2831 size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params)
2832 {
2833 	return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
2834 }
2835 
2836 /* =====  Dictionary API  ===== */
2837 
2838 struct ZSTD_CDict_s {
2839 	void *dictBuffer;
2840 	const void *dictContent;
2841 	size_t dictContentSize;
2842 	ZSTD_CCtx *refContext;
2843 }; /* typedef'd tp ZSTD_CDict within "zstd.h" */
2844 
ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams)2845 size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); }
2846 
ZSTD_createCDict_advanced(const void * dictBuffer,size_t dictSize,unsigned byReference,ZSTD_parameters params,ZSTD_customMem customMem)2847 static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem)
2848 {
2849 	if (!customMem.customAlloc || !customMem.customFree)
2850 		return NULL;
2851 
2852 	{
2853 		ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
2854 		ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem);
2855 
2856 		if (!cdict || !cctx) {
2857 			ZSTD_free(cdict, customMem);
2858 			ZSTD_freeCCtx(cctx);
2859 			return NULL;
2860 		}
2861 
2862 		if ((byReference) || (!dictBuffer) || (!dictSize)) {
2863 			cdict->dictBuffer = NULL;
2864 			cdict->dictContent = dictBuffer;
2865 		} else {
2866 			void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
2867 			if (!internalBuffer) {
2868 				ZSTD_free(cctx, customMem);
2869 				ZSTD_free(cdict, customMem);
2870 				return NULL;
2871 			}
2872 			memcpy(internalBuffer, dictBuffer, dictSize);
2873 			cdict->dictBuffer = internalBuffer;
2874 			cdict->dictContent = internalBuffer;
2875 		}
2876 
2877 		{
2878 			size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
2879 			if (ZSTD_isError(errorCode)) {
2880 				ZSTD_free(cdict->dictBuffer, customMem);
2881 				ZSTD_free(cdict, customMem);
2882 				ZSTD_freeCCtx(cctx);
2883 				return NULL;
2884 			}
2885 		}
2886 
2887 		cdict->refContext = cctx;
2888 		cdict->dictContentSize = dictSize;
2889 		return cdict;
2890 	}
2891 }
2892 
ZSTD_initCDict(const void * dict,size_t dictSize,ZSTD_parameters params,void * workspace,size_t workspaceSize)2893 ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize)
2894 {
2895 	ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
2896 	return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem);
2897 }
2898 
ZSTD_freeCDict(ZSTD_CDict * cdict)2899 size_t ZSTD_freeCDict(ZSTD_CDict *cdict)
2900 {
2901 	if (cdict == NULL)
2902 		return 0; /* support free on NULL */
2903 	{
2904 		ZSTD_customMem const cMem = cdict->refContext->customMem;
2905 		ZSTD_freeCCtx(cdict->refContext);
2906 		ZSTD_free(cdict->dictBuffer, cMem);
2907 		ZSTD_free(cdict, cMem);
2908 		return 0;
2909 	}
2910 }
2911 
ZSTD_getParamsFromCDict(const ZSTD_CDict * cdict)2912 static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); }
2913 
ZSTD_compressBegin_usingCDict(ZSTD_CCtx * cctx,const ZSTD_CDict * cdict,unsigned long long pledgedSrcSize)2914 size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize)
2915 {
2916 	if (cdict->dictContentSize)
2917 		CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
2918 	else {
2919 		ZSTD_parameters params = cdict->refContext->params;
2920 		params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
2921 		CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize));
2922 	}
2923 	return 0;
2924 }
2925 
2926 /*! ZSTD_compress_usingCDict() :
2927 *   Compression using a digested Dictionary.
2928 *   Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
2929 *   Note that compression level is decided during dictionary creation */
ZSTD_compress_usingCDict(ZSTD_CCtx * cctx,void * dst,size_t dstCapacity,const void * src,size_t srcSize,const ZSTD_CDict * cdict)2930 size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict)
2931 {
2932 	CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
2933 
2934 	if (cdict->refContext->params.fParams.contentSizeFlag == 1) {
2935 		cctx->params.fParams.contentSizeFlag = 1;
2936 		cctx->frameContentSize = srcSize;
2937 	} else {
2938 		cctx->params.fParams.contentSizeFlag = 0;
2939 	}
2940 
2941 	return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
2942 }
2943 
2944 /* ******************************************************************
2945 *  Streaming
2946 ********************************************************************/
2947 
2948 typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
2949 
2950 struct ZSTD_CStream_s {
2951 	ZSTD_CCtx *cctx;
2952 	ZSTD_CDict *cdictLocal;
2953 	const ZSTD_CDict *cdict;
2954 	char *inBuff;
2955 	size_t inBuffSize;
2956 	size_t inToCompress;
2957 	size_t inBuffPos;
2958 	size_t inBuffTarget;
2959 	size_t blockSize;
2960 	char *outBuff;
2961 	size_t outBuffSize;
2962 	size_t outBuffContentSize;
2963 	size_t outBuffFlushedSize;
2964 	ZSTD_cStreamStage stage;
2965 	U32 checksum;
2966 	U32 frameEnded;
2967 	U64 pledgedSrcSize;
2968 	U64 inputProcessed;
2969 	ZSTD_parameters params;
2970 	ZSTD_customMem customMem;
2971 }; /* typedef'd to ZSTD_CStream within "zstd.h" */
2972 
ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams)2973 size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams)
2974 {
2975 	size_t const inBuffSize = (size_t)1 << cParams.windowLog;
2976 	size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize);
2977 	size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
2978 
2979 	return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
2980 }
2981 
ZSTD_createCStream_advanced(ZSTD_customMem customMem)2982 ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem)
2983 {
2984 	ZSTD_CStream *zcs;
2985 
2986 	if (!customMem.customAlloc || !customMem.customFree)
2987 		return NULL;
2988 
2989 	zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
2990 	if (zcs == NULL)
2991 		return NULL;
2992 	memset(zcs, 0, sizeof(ZSTD_CStream));
2993 	memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
2994 	zcs->cctx = ZSTD_createCCtx_advanced(customMem);
2995 	if (zcs->cctx == NULL) {
2996 		ZSTD_freeCStream(zcs);
2997 		return NULL;
2998 	}
2999 	return zcs;
3000 }
3001 
ZSTD_freeCStream(ZSTD_CStream * zcs)3002 size_t ZSTD_freeCStream(ZSTD_CStream *zcs)
3003 {
3004 	if (zcs == NULL)
3005 		return 0; /* support free on NULL */
3006 	{
3007 		ZSTD_customMem const cMem = zcs->customMem;
3008 		ZSTD_freeCCtx(zcs->cctx);
3009 		zcs->cctx = NULL;
3010 		ZSTD_freeCDict(zcs->cdictLocal);
3011 		zcs->cdictLocal = NULL;
3012 		ZSTD_free(zcs->inBuff, cMem);
3013 		zcs->inBuff = NULL;
3014 		ZSTD_free(zcs->outBuff, cMem);
3015 		zcs->outBuff = NULL;
3016 		ZSTD_free(zcs, cMem);
3017 		return 0;
3018 	}
3019 }
3020 
3021 /*======   Initialization   ======*/
3022 
ZSTD_CStreamInSize(void)3023 size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
ZSTD_CStreamOutSize(void)3024 size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */; }
3025 
ZSTD_resetCStream_internal(ZSTD_CStream * zcs,unsigned long long pledgedSrcSize)3026 static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
3027 {
3028 	if (zcs->inBuffSize == 0)
3029 		return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */
3030 
3031 	if (zcs->cdict)
3032 		CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
3033 	else
3034 		CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
3035 
3036 	zcs->inToCompress = 0;
3037 	zcs->inBuffPos = 0;
3038 	zcs->inBuffTarget = zcs->blockSize;
3039 	zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
3040 	zcs->stage = zcss_load;
3041 	zcs->frameEnded = 0;
3042 	zcs->pledgedSrcSize = pledgedSrcSize;
3043 	zcs->inputProcessed = 0;
3044 	return 0; /* ready to go */
3045 }
3046 
ZSTD_resetCStream(ZSTD_CStream * zcs,unsigned long long pledgedSrcSize)3047 size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
3048 {
3049 
3050 	zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
3051 
3052 	return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
3053 }
3054 
ZSTD_initCStream_advanced(ZSTD_CStream * zcs,const void * dict,size_t dictSize,ZSTD_parameters params,unsigned long long pledgedSrcSize)3055 static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
3056 {
3057 	/* allocate buffers */
3058 	{
3059 		size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
3060 		if (zcs->inBuffSize < neededInBuffSize) {
3061 			zcs->inBuffSize = neededInBuffSize;
3062 			ZSTD_free(zcs->inBuff, zcs->customMem);
3063 			zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem);
3064 			if (zcs->inBuff == NULL)
3065 				return ERROR(memory_allocation);
3066 		}
3067 		zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
3068 	}
3069 	if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) {
3070 		zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1;
3071 		ZSTD_free(zcs->outBuff, zcs->customMem);
3072 		zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
3073 		if (zcs->outBuff == NULL)
3074 			return ERROR(memory_allocation);
3075 	}
3076 
3077 	if (dict && dictSize >= 8) {
3078 		ZSTD_freeCDict(zcs->cdictLocal);
3079 		zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
3080 		if (zcs->cdictLocal == NULL)
3081 			return ERROR(memory_allocation);
3082 		zcs->cdict = zcs->cdictLocal;
3083 	} else
3084 		zcs->cdict = NULL;
3085 
3086 	zcs->checksum = params.fParams.checksumFlag > 0;
3087 	zcs->params = params;
3088 
3089 	return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
3090 }
3091 
ZSTD_initCStream(ZSTD_parameters params,unsigned long long pledgedSrcSize,void * workspace,size_t workspaceSize)3092 ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
3093 {
3094 	ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
3095 	ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem);
3096 	if (zcs) {
3097 		size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
3098 		if (ZSTD_isError(code)) {
3099 			return NULL;
3100 		}
3101 	}
3102 	return zcs;
3103 }
3104 
ZSTD_initCStream_usingCDict(const ZSTD_CDict * cdict,unsigned long long pledgedSrcSize,void * workspace,size_t workspaceSize)3105 ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
3106 {
3107 	ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
3108 	ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize);
3109 	if (zcs) {
3110 		zcs->cdict = cdict;
3111 		if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) {
3112 			return NULL;
3113 		}
3114 	}
3115 	return zcs;
3116 }
3117 
3118 /*======   Compression   ======*/
3119 
3120 typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
3121 
ZSTD_limitCopy(void * dst,size_t dstCapacity,const void * src,size_t srcSize)3122 ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
3123 {
3124 	size_t const length = MIN(dstCapacity, srcSize);
3125 	memcpy(dst, src, length);
3126 	return length;
3127 }
3128 
ZSTD_compressStream_generic(ZSTD_CStream * zcs,void * dst,size_t * dstCapacityPtr,const void * src,size_t * srcSizePtr,ZSTD_flush_e const flush)3129 static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush)
3130 {
3131 	U32 someMoreWork = 1;
3132 	const char *const istart = (const char *)src;
3133 	const char *const iend = istart + *srcSizePtr;
3134 	const char *ip = istart;
3135 	char *const ostart = (char *)dst;
3136 	char *const oend = ostart + *dstCapacityPtr;
3137 	char *op = ostart;
3138 
3139 	while (someMoreWork) {
3140 		switch (zcs->stage) {
3141 		case zcss_init:
3142 			return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */
3143 
3144 		case zcss_load:
3145 			/* complete inBuffer */
3146 			{
3147 				size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
3148 				size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip);
3149 				zcs->inBuffPos += loaded;
3150 				ip += loaded;
3151 				if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) {
3152 					someMoreWork = 0;
3153 					break; /* not enough input to get a full block : stop there, wait for more */
3154 				}
3155 			}
3156 			/* compress curr block (note : this stage cannot be stopped in the middle) */
3157 			{
3158 				void *cDst;
3159 				size_t cSize;
3160 				size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
3161 				size_t oSize = oend - op;
3162 				if (oSize >= ZSTD_compressBound(iSize))
3163 					cDst = op; /* compress directly into output buffer (avoid flush stage) */
3164 				else
3165 					cDst = zcs->outBuff, oSize = zcs->outBuffSize;
3166 				cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize)
3167 							   : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
3168 				if (ZSTD_isError(cSize))
3169 					return cSize;
3170 				if (flush == zsf_end)
3171 					zcs->frameEnded = 1;
3172 				/* prepare next block */
3173 				zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
3174 				if (zcs->inBuffTarget > zcs->inBuffSize)
3175 					zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */
3176 				zcs->inToCompress = zcs->inBuffPos;
3177 				if (cDst == op) {
3178 					op += cSize;
3179 					break;
3180 				} /* no need to flush */
3181 				zcs->outBuffContentSize = cSize;
3182 				zcs->outBuffFlushedSize = 0;
3183 				zcs->stage = zcss_flush; /* pass-through to flush stage */
3184 			}
3185 			/* fall through */
3186 
3187 		case zcss_flush: {
3188 			size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3189 			size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
3190 			op += flushed;
3191 			zcs->outBuffFlushedSize += flushed;
3192 			if (toFlush != flushed) {
3193 				someMoreWork = 0;
3194 				break;
3195 			} /* dst too small to store flushed data : stop there */
3196 			zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
3197 			zcs->stage = zcss_load;
3198 			break;
3199 		}
3200 
3201 		case zcss_final:
3202 			someMoreWork = 0; /* do nothing */
3203 			break;
3204 
3205 		default:
3206 			return ERROR(GENERIC); /* impossible */
3207 		}
3208 	}
3209 
3210 	*srcSizePtr = ip - istart;
3211 	*dstCapacityPtr = op - ostart;
3212 	zcs->inputProcessed += *srcSizePtr;
3213 	if (zcs->frameEnded)
3214 		return 0;
3215 	{
3216 		size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
3217 		if (hintInSize == 0)
3218 			hintInSize = zcs->blockSize;
3219 		return hintInSize;
3220 	}
3221 }
3222 
ZSTD_compressStream(ZSTD_CStream * zcs,ZSTD_outBuffer * output,ZSTD_inBuffer * input)3223 size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
3224 {
3225 	size_t sizeRead = input->size - input->pos;
3226 	size_t sizeWritten = output->size - output->pos;
3227 	size_t const result =
3228 	    ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather);
3229 	input->pos += sizeRead;
3230 	output->pos += sizeWritten;
3231 	return result;
3232 }
3233 
3234 /*======   Finalize   ======*/
3235 
3236 /*! ZSTD_flushStream() :
3237 *   @return : amount of data remaining to flush */
ZSTD_flushStream(ZSTD_CStream * zcs,ZSTD_outBuffer * output)3238 size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
3239 {
3240 	size_t srcSize = 0;
3241 	size_t sizeWritten = output->size - output->pos;
3242 	size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize,
3243 							  &srcSize, /* use a valid src address instead of NULL */
3244 							  zsf_flush);
3245 	output->pos += sizeWritten;
3246 	if (ZSTD_isError(result))
3247 		return result;
3248 	return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */
3249 }
3250 
ZSTD_endStream(ZSTD_CStream * zcs,ZSTD_outBuffer * output)3251 size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
3252 {
3253 	BYTE *const ostart = (BYTE *)(output->dst) + output->pos;
3254 	BYTE *const oend = (BYTE *)(output->dst) + output->size;
3255 	BYTE *op = ostart;
3256 
3257 	if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
3258 		return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */
3259 
3260 	if (zcs->stage != zcss_final) {
3261 		/* flush whatever remains */
3262 		size_t srcSize = 0;
3263 		size_t sizeWritten = output->size - output->pos;
3264 		size_t const notEnded =
3265 		    ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */
3266 		size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3267 		op += sizeWritten;
3268 		if (remainingToFlush) {
3269 			output->pos += sizeWritten;
3270 			return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
3271 		}
3272 		/* create epilogue */
3273 		zcs->stage = zcss_final;
3274 		zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL,
3275 									   0); /* write epilogue, including final empty block, into outBuff */
3276 	}
3277 
3278 	/* flush epilogue */
3279 	{
3280 		size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3281 		size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
3282 		op += flushed;
3283 		zcs->outBuffFlushedSize += flushed;
3284 		output->pos += op - ostart;
3285 		if (toFlush == flushed)
3286 			zcs->stage = zcss_init; /* end reached */
3287 		return toFlush - flushed;
3288 	}
3289 }
3290 
3291 /*-=====  Pre-defined compression levels  =====-*/
3292 
3293 #define ZSTD_DEFAULT_CLEVEL 1
3294 #define ZSTD_MAX_CLEVEL 22
ZSTD_maxCLevel(void)3295 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
3296 
3297 static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = {
3298     {
3299 	/* "default" */
3300 	/* W,  C,  H,  S,  L, TL, strat */
3301 	{18, 12, 12, 1, 7, 16, ZSTD_fast},    /* level  0 - never used */
3302 	{19, 13, 14, 1, 7, 16, ZSTD_fast},    /* level  1 */
3303 	{19, 15, 16, 1, 6, 16, ZSTD_fast},    /* level  2 */
3304 	{20, 16, 17, 1, 5, 16, ZSTD_dfast},   /* level  3.*/
3305 	{20, 18, 18, 1, 5, 16, ZSTD_dfast},   /* level  4.*/
3306 	{20, 15, 18, 3, 5, 16, ZSTD_greedy},  /* level  5 */
3307 	{21, 16, 19, 2, 5, 16, ZSTD_lazy},    /* level  6 */
3308 	{21, 17, 20, 3, 5, 16, ZSTD_lazy},    /* level  7 */
3309 	{21, 18, 20, 3, 5, 16, ZSTD_lazy2},   /* level  8 */
3310 	{21, 20, 20, 3, 5, 16, ZSTD_lazy2},   /* level  9 */
3311 	{21, 19, 21, 4, 5, 16, ZSTD_lazy2},   /* level 10 */
3312 	{22, 20, 22, 4, 5, 16, ZSTD_lazy2},   /* level 11 */
3313 	{22, 20, 22, 5, 5, 16, ZSTD_lazy2},   /* level 12 */
3314 	{22, 21, 22, 5, 5, 16, ZSTD_lazy2},   /* level 13 */
3315 	{22, 21, 22, 6, 5, 16, ZSTD_lazy2},   /* level 14 */
3316 	{22, 21, 21, 5, 5, 16, ZSTD_btlazy2}, /* level 15 */
3317 	{23, 22, 22, 5, 5, 16, ZSTD_btlazy2}, /* level 16 */
3318 	{23, 21, 22, 4, 5, 24, ZSTD_btopt},   /* level 17 */
3319 	{23, 23, 22, 6, 5, 32, ZSTD_btopt},   /* level 18 */
3320 	{23, 23, 22, 6, 3, 48, ZSTD_btopt},   /* level 19 */
3321 	{25, 25, 23, 7, 3, 64, ZSTD_btopt2},  /* level 20 */
3322 	{26, 26, 23, 7, 3, 256, ZSTD_btopt2}, /* level 21 */
3323 	{27, 27, 25, 9, 3, 512, ZSTD_btopt2}, /* level 22 */
3324     },
3325     {
3326 	/* for srcSize <= 256 KB */
3327 	/* W,  C,  H,  S,  L,  T, strat */
3328 	{0, 0, 0, 0, 0, 0, ZSTD_fast},	 /* level  0 - not used */
3329 	{18, 13, 14, 1, 6, 8, ZSTD_fast},      /* level  1 */
3330 	{18, 14, 13, 1, 5, 8, ZSTD_dfast},     /* level  2 */
3331 	{18, 16, 15, 1, 5, 8, ZSTD_dfast},     /* level  3 */
3332 	{18, 15, 17, 1, 5, 8, ZSTD_greedy},    /* level  4.*/
3333 	{18, 16, 17, 4, 5, 8, ZSTD_greedy},    /* level  5.*/
3334 	{18, 16, 17, 3, 5, 8, ZSTD_lazy},      /* level  6.*/
3335 	{18, 17, 17, 4, 4, 8, ZSTD_lazy},      /* level  7 */
3336 	{18, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
3337 	{18, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
3338 	{18, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
3339 	{18, 18, 17, 6, 4, 8, ZSTD_lazy2},     /* level 11.*/
3340 	{18, 18, 17, 7, 4, 8, ZSTD_lazy2},     /* level 12.*/
3341 	{18, 19, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13 */
3342 	{18, 18, 18, 4, 4, 16, ZSTD_btopt},    /* level 14.*/
3343 	{18, 18, 18, 4, 3, 16, ZSTD_btopt},    /* level 15.*/
3344 	{18, 19, 18, 6, 3, 32, ZSTD_btopt},    /* level 16.*/
3345 	{18, 19, 18, 8, 3, 64, ZSTD_btopt},    /* level 17.*/
3346 	{18, 19, 18, 9, 3, 128, ZSTD_btopt},   /* level 18.*/
3347 	{18, 19, 18, 10, 3, 256, ZSTD_btopt},  /* level 19.*/
3348 	{18, 19, 18, 11, 3, 512, ZSTD_btopt2}, /* level 20.*/
3349 	{18, 19, 18, 12, 3, 512, ZSTD_btopt2}, /* level 21.*/
3350 	{18, 19, 18, 13, 3, 512, ZSTD_btopt2}, /* level 22.*/
3351     },
3352     {
3353 	/* for srcSize <= 128 KB */
3354 	/* W,  C,  H,  S,  L,  T, strat */
3355 	{17, 12, 12, 1, 7, 8, ZSTD_fast},      /* level  0 - not used */
3356 	{17, 12, 13, 1, 6, 8, ZSTD_fast},      /* level  1 */
3357 	{17, 13, 16, 1, 5, 8, ZSTD_fast},      /* level  2 */
3358 	{17, 16, 16, 2, 5, 8, ZSTD_dfast},     /* level  3 */
3359 	{17, 13, 15, 3, 4, 8, ZSTD_greedy},    /* level  4 */
3360 	{17, 15, 17, 4, 4, 8, ZSTD_greedy},    /* level  5 */
3361 	{17, 16, 17, 3, 4, 8, ZSTD_lazy},      /* level  6 */
3362 	{17, 15, 17, 4, 4, 8, ZSTD_lazy2},     /* level  7 */
3363 	{17, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
3364 	{17, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
3365 	{17, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
3366 	{17, 17, 17, 7, 4, 8, ZSTD_lazy2},     /* level 11 */
3367 	{17, 17, 17, 8, 4, 8, ZSTD_lazy2},     /* level 12 */
3368 	{17, 18, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13.*/
3369 	{17, 17, 17, 7, 3, 8, ZSTD_btopt},     /* level 14.*/
3370 	{17, 17, 17, 7, 3, 16, ZSTD_btopt},    /* level 15.*/
3371 	{17, 18, 17, 7, 3, 32, ZSTD_btopt},    /* level 16.*/
3372 	{17, 18, 17, 7, 3, 64, ZSTD_btopt},    /* level 17.*/
3373 	{17, 18, 17, 7, 3, 256, ZSTD_btopt},   /* level 18.*/
3374 	{17, 18, 17, 8, 3, 256, ZSTD_btopt},   /* level 19.*/
3375 	{17, 18, 17, 9, 3, 256, ZSTD_btopt2},  /* level 20.*/
3376 	{17, 18, 17, 10, 3, 256, ZSTD_btopt2}, /* level 21.*/
3377 	{17, 18, 17, 11, 3, 512, ZSTD_btopt2}, /* level 22.*/
3378     },
3379     {
3380 	/* for srcSize <= 16 KB */
3381 	/* W,  C,  H,  S,  L,  T, strat */
3382 	{14, 12, 12, 1, 7, 6, ZSTD_fast},      /* level  0 - not used */
3383 	{14, 14, 14, 1, 6, 6, ZSTD_fast},      /* level  1 */
3384 	{14, 14, 14, 1, 4, 6, ZSTD_fast},      /* level  2 */
3385 	{14, 14, 14, 1, 4, 6, ZSTD_dfast},     /* level  3.*/
3386 	{14, 14, 14, 4, 4, 6, ZSTD_greedy},    /* level  4.*/
3387 	{14, 14, 14, 3, 4, 6, ZSTD_lazy},      /* level  5.*/
3388 	{14, 14, 14, 4, 4, 6, ZSTD_lazy2},     /* level  6 */
3389 	{14, 14, 14, 5, 4, 6, ZSTD_lazy2},     /* level  7 */
3390 	{14, 14, 14, 6, 4, 6, ZSTD_lazy2},     /* level  8.*/
3391 	{14, 15, 14, 6, 4, 6, ZSTD_btlazy2},   /* level  9.*/
3392 	{14, 15, 14, 3, 3, 6, ZSTD_btopt},     /* level 10.*/
3393 	{14, 15, 14, 6, 3, 8, ZSTD_btopt},     /* level 11.*/
3394 	{14, 15, 14, 6, 3, 16, ZSTD_btopt},    /* level 12.*/
3395 	{14, 15, 14, 6, 3, 24, ZSTD_btopt},    /* level 13.*/
3396 	{14, 15, 15, 6, 3, 48, ZSTD_btopt},    /* level 14.*/
3397 	{14, 15, 15, 6, 3, 64, ZSTD_btopt},    /* level 15.*/
3398 	{14, 15, 15, 6, 3, 96, ZSTD_btopt},    /* level 16.*/
3399 	{14, 15, 15, 6, 3, 128, ZSTD_btopt},   /* level 17.*/
3400 	{14, 15, 15, 6, 3, 256, ZSTD_btopt},   /* level 18.*/
3401 	{14, 15, 15, 7, 3, 256, ZSTD_btopt},   /* level 19.*/
3402 	{14, 15, 15, 8, 3, 256, ZSTD_btopt2},  /* level 20.*/
3403 	{14, 15, 15, 9, 3, 256, ZSTD_btopt2},  /* level 21.*/
3404 	{14, 15, 15, 10, 3, 256, ZSTD_btopt2}, /* level 22.*/
3405     },
3406 };
3407 
3408 /*! ZSTD_getCParams() :
3409 *   @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
3410 *   Size values are optional, provide 0 if not known or unused */
ZSTD_getCParams(int compressionLevel,unsigned long long srcSize,size_t dictSize)3411 ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
3412 {
3413 	ZSTD_compressionParameters cp;
3414 	size_t const addedSize = srcSize ? 0 : 500;
3415 	U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1;
3416 	U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */
3417 	if (compressionLevel <= 0)
3418 		compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */
3419 	if (compressionLevel > ZSTD_MAX_CLEVEL)
3420 		compressionLevel = ZSTD_MAX_CLEVEL;
3421 	cp = ZSTD_defaultCParameters[tableID][compressionLevel];
3422 	if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */
3423 		if (cp.windowLog > ZSTD_WINDOWLOG_MAX)
3424 			cp.windowLog = ZSTD_WINDOWLOG_MAX;
3425 		if (cp.chainLog > ZSTD_CHAINLOG_MAX)
3426 			cp.chainLog = ZSTD_CHAINLOG_MAX;
3427 		if (cp.hashLog > ZSTD_HASHLOG_MAX)
3428 			cp.hashLog = ZSTD_HASHLOG_MAX;
3429 	}
3430 	cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
3431 	return cp;
3432 }
3433 
3434 /*! ZSTD_getParams() :
3435 *   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
3436 *   All fields of `ZSTD_frameParameters` are set to default (0) */
ZSTD_getParams(int compressionLevel,unsigned long long srcSize,size_t dictSize)3437 ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
3438 {
3439 	ZSTD_parameters params;
3440 	ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
3441 	memset(&params, 0, sizeof(params));
3442 	params.cParams = cParams;
3443 	return params;
3444 }
3445 
3446 EXPORT_SYMBOL(ZSTD_maxCLevel);
3447 EXPORT_SYMBOL(ZSTD_compressBound);
3448 
3449 EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound);
3450 EXPORT_SYMBOL(ZSTD_initCCtx);
3451 EXPORT_SYMBOL(ZSTD_compressCCtx);
3452 EXPORT_SYMBOL(ZSTD_compress_usingDict);
3453 
3454 EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound);
3455 EXPORT_SYMBOL(ZSTD_initCDict);
3456 EXPORT_SYMBOL(ZSTD_compress_usingCDict);
3457 
3458 EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound);
3459 EXPORT_SYMBOL(ZSTD_initCStream);
3460 EXPORT_SYMBOL(ZSTD_initCStream_usingCDict);
3461 EXPORT_SYMBOL(ZSTD_resetCStream);
3462 EXPORT_SYMBOL(ZSTD_compressStream);
3463 EXPORT_SYMBOL(ZSTD_flushStream);
3464 EXPORT_SYMBOL(ZSTD_endStream);
3465 EXPORT_SYMBOL(ZSTD_CStreamInSize);
3466 EXPORT_SYMBOL(ZSTD_CStreamOutSize);
3467 
3468 EXPORT_SYMBOL(ZSTD_getCParams);
3469 EXPORT_SYMBOL(ZSTD_getParams);
3470 EXPORT_SYMBOL(ZSTD_checkCParams);
3471 EXPORT_SYMBOL(ZSTD_adjustCParams);
3472 
3473 EXPORT_SYMBOL(ZSTD_compressBegin);
3474 EXPORT_SYMBOL(ZSTD_compressBegin_usingDict);
3475 EXPORT_SYMBOL(ZSTD_compressBegin_advanced);
3476 EXPORT_SYMBOL(ZSTD_copyCCtx);
3477 EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict);
3478 EXPORT_SYMBOL(ZSTD_compressContinue);
3479 EXPORT_SYMBOL(ZSTD_compressEnd);
3480 
3481 EXPORT_SYMBOL(ZSTD_getBlockSizeMax);
3482 EXPORT_SYMBOL(ZSTD_compressBlock);
3483 
3484 MODULE_LICENSE("Dual BSD/GPL");
3485 MODULE_DESCRIPTION("Zstd Compressor");
3486