1 /*
2 * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11
12 /*-**************************************
13 * Tuning parameters
14 ****************************************/
15 #define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */
16 #define ZDICT_MAX_SAMPLES_SIZE (2000U << 20)
17 #define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO)
18
19
20 /*-**************************************
21 * Compiler Options
22 ****************************************/
23 /* Unix Large Files support (>4GB) */
24 #define _FILE_OFFSET_BITS 64
25 #if (defined(__sun__) && (!defined(__LP64__))) /* Sun Solaris 32-bits requires specific definitions */
26 # define _LARGEFILE_SOURCE
27 #elif ! defined(__LP64__) /* No point defining Large file for 64 bit */
28 # define _LARGEFILE64_SOURCE
29 #endif
30
31
32 /*-*************************************
33 * Dependencies
34 ***************************************/
35 #include <stdlib.h> /* malloc, free */
36 #include <string.h> /* memset */
37 #include <stdio.h> /* fprintf, fopen, ftello64 */
38 #include <time.h> /* clock */
39
40 #include "../common/mem.h" /* read */
41 #include "../common/fse.h" /* FSE_normalizeCount, FSE_writeNCount */
42 #define HUF_STATIC_LINKING_ONLY
43 #include "../common/huf.h" /* HUF_buildCTable, HUF_writeCTable */
44 #include "../common/zstd_internal.h" /* includes zstd.h */
45 #include "../common/xxhash.h" /* XXH64 */
46 #include "divsufsort.h"
47 #ifndef ZDICT_STATIC_LINKING_ONLY
48 # define ZDICT_STATIC_LINKING_ONLY
49 #endif
50 #include "zdict.h"
51 #include "../compress/zstd_compress_internal.h" /* ZSTD_loadCEntropy() */
52
53
54 /*-*************************************
55 * Constants
56 ***************************************/
57 #define KB *(1 <<10)
58 #define MB *(1 <<20)
59 #define GB *(1U<<30)
60
61 #define DICTLISTSIZE_DEFAULT 10000
62
63 #define NOISELENGTH 32
64
65 static const U32 g_selectivity_default = 9;
66
67
68 /*-*************************************
69 * Console display
70 ***************************************/
71 #undef DISPLAY
72 #define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); }
73 #undef DISPLAYLEVEL
74 #define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
75
ZDICT_clockSpan(clock_t nPrevious)76 static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }
77
ZDICT_printHex(const void * ptr,size_t length)78 static void ZDICT_printHex(const void* ptr, size_t length)
79 {
80 const BYTE* const b = (const BYTE*)ptr;
81 size_t u;
82 for (u=0; u<length; u++) {
83 BYTE c = b[u];
84 if (c<32 || c>126) c = '.'; /* non-printable char */
85 DISPLAY("%c", c);
86 }
87 }
88
89
90 /*-********************************************************
91 * Helper functions
92 **********************************************************/
ZDICT_isError(size_t errorCode)93 unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); }
94
ZDICT_getErrorName(size_t errorCode)95 const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
96
ZDICT_getDictID(const void * dictBuffer,size_t dictSize)97 unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize)
98 {
99 if (dictSize < 8) return 0;
100 if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0;
101 return MEM_readLE32((const char*)dictBuffer + 4);
102 }
103
ZDICT_getDictHeaderSize(const void * dictBuffer,size_t dictSize)104 size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize)
105 {
106 size_t headerSize;
107 if (dictSize <= 8 || MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return ERROR(dictionary_corrupted);
108
109 { ZSTD_compressedBlockState_t* bs = (ZSTD_compressedBlockState_t*)malloc(sizeof(ZSTD_compressedBlockState_t));
110 U32* wksp = (U32*)malloc(HUF_WORKSPACE_SIZE);
111 if (!bs || !wksp) {
112 headerSize = ERROR(memory_allocation);
113 } else {
114 ZSTD_reset_compressedBlockState(bs);
115 headerSize = ZSTD_loadCEntropy(bs, wksp, dictBuffer, dictSize);
116 }
117
118 free(bs);
119 free(wksp);
120 }
121
122 return headerSize;
123 }
124
125 /*-********************************************************
126 * Dictionary training functions
127 **********************************************************/
ZDICT_NbCommonBytes(size_t val)128 static unsigned ZDICT_NbCommonBytes (size_t val)
129 {
130 if (MEM_isLittleEndian()) {
131 if (MEM_64bits()) {
132 # if defined(_MSC_VER) && defined(_WIN64)
133 unsigned long r = 0;
134 _BitScanForward64( &r, (U64)val );
135 return (unsigned)(r>>3);
136 # elif defined(__GNUC__) && (__GNUC__ >= 3)
137 return (__builtin_ctzll((U64)val) >> 3);
138 # else
139 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
140 return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
141 # endif
142 } else { /* 32 bits */
143 # if defined(_MSC_VER)
144 unsigned long r=0;
145 _BitScanForward( &r, (U32)val );
146 return (unsigned)(r>>3);
147 # elif defined(__GNUC__) && (__GNUC__ >= 3)
148 return (__builtin_ctz((U32)val) >> 3);
149 # else
150 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
151 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
152 # endif
153 }
154 } else { /* Big Endian CPU */
155 if (MEM_64bits()) {
156 # if defined(_MSC_VER) && defined(_WIN64)
157 unsigned long r = 0;
158 _BitScanReverse64( &r, val );
159 return (unsigned)(r>>3);
160 # elif defined(__GNUC__) && (__GNUC__ >= 3)
161 return (__builtin_clzll(val) >> 3);
162 # else
163 unsigned r;
164 const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
165 if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
166 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
167 r += (!val);
168 return r;
169 # endif
170 } else { /* 32 bits */
171 # if defined(_MSC_VER)
172 unsigned long r = 0;
173 _BitScanReverse( &r, (unsigned long)val );
174 return (unsigned)(r>>3);
175 # elif defined(__GNUC__) && (__GNUC__ >= 3)
176 return (__builtin_clz((U32)val) >> 3);
177 # else
178 unsigned r;
179 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
180 r += (!val);
181 return r;
182 # endif
183 } }
184 }
185
186
187 /*! ZDICT_count() :
188 Count the nb of common bytes between 2 pointers.
189 Note : this function presumes end of buffer followed by noisy guard band.
190 */
ZDICT_count(const void * pIn,const void * pMatch)191 static size_t ZDICT_count(const void* pIn, const void* pMatch)
192 {
193 const char* const pStart = (const char*)pIn;
194 for (;;) {
195 size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
196 if (!diff) {
197 pIn = (const char*)pIn+sizeof(size_t);
198 pMatch = (const char*)pMatch+sizeof(size_t);
199 continue;
200 }
201 pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff);
202 return (size_t)((const char*)pIn - pStart);
203 }
204 }
205
206
207 typedef struct {
208 U32 pos;
209 U32 length;
210 U32 savings;
211 } dictItem;
212
ZDICT_initDictItem(dictItem * d)213 static void ZDICT_initDictItem(dictItem* d)
214 {
215 d->pos = 1;
216 d->length = 0;
217 d->savings = (U32)(-1);
218 }
219
220
221 #define LLIMIT 64 /* heuristic determined experimentally */
222 #define MINMATCHLENGTH 7 /* heuristic determined experimentally */
ZDICT_analyzePos(BYTE * doneMarks,const int * suffix,U32 start,const void * buffer,U32 minRatio,U32 notificationLevel)223 static dictItem ZDICT_analyzePos(
224 BYTE* doneMarks,
225 const int* suffix, U32 start,
226 const void* buffer, U32 minRatio, U32 notificationLevel)
227 {
228 U32 lengthList[LLIMIT] = {0};
229 U32 cumulLength[LLIMIT] = {0};
230 U32 savings[LLIMIT] = {0};
231 const BYTE* b = (const BYTE*)buffer;
232 size_t maxLength = LLIMIT;
233 size_t pos = suffix[start];
234 U32 end = start;
235 dictItem solution;
236
237 /* init */
238 memset(&solution, 0, sizeof(solution));
239 doneMarks[pos] = 1;
240
241 /* trivial repetition cases */
242 if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2))
243 ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3))
244 ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) {
245 /* skip and mark segment */
246 U16 const pattern16 = MEM_read16(b+pos+4);
247 U32 u, patternEnd = 6;
248 while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ;
249 if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++;
250 for (u=1; u<patternEnd; u++)
251 doneMarks[pos+u] = 1;
252 return solution;
253 }
254
255 /* look forward */
256 { size_t length;
257 do {
258 end++;
259 length = ZDICT_count(b + pos, b + suffix[end]);
260 } while (length >= MINMATCHLENGTH);
261 }
262
263 /* look backward */
264 { size_t length;
265 do {
266 length = ZDICT_count(b + pos, b + *(suffix+start-1));
267 if (length >=MINMATCHLENGTH) start--;
268 } while(length >= MINMATCHLENGTH);
269 }
270
271 /* exit if not found a minimum nb of repetitions */
272 if (end-start < minRatio) {
273 U32 idx;
274 for(idx=start; idx<end; idx++)
275 doneMarks[suffix[idx]] = 1;
276 return solution;
277 }
278
279 { int i;
280 U32 mml;
281 U32 refinedStart = start;
282 U32 refinedEnd = end;
283
284 DISPLAYLEVEL(4, "\n");
285 DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos);
286 DISPLAYLEVEL(4, "\n");
287
288 for (mml = MINMATCHLENGTH ; ; mml++) {
289 BYTE currentChar = 0;
290 U32 currentCount = 0;
291 U32 currentID = refinedStart;
292 U32 id;
293 U32 selectedCount = 0;
294 U32 selectedID = currentID;
295 for (id =refinedStart; id < refinedEnd; id++) {
296 if (b[suffix[id] + mml] != currentChar) {
297 if (currentCount > selectedCount) {
298 selectedCount = currentCount;
299 selectedID = currentID;
300 }
301 currentID = id;
302 currentChar = b[ suffix[id] + mml];
303 currentCount = 0;
304 }
305 currentCount ++;
306 }
307 if (currentCount > selectedCount) { /* for last */
308 selectedCount = currentCount;
309 selectedID = currentID;
310 }
311
312 if (selectedCount < minRatio)
313 break;
314 refinedStart = selectedID;
315 refinedEnd = refinedStart + selectedCount;
316 }
317
318 /* evaluate gain based on new dict */
319 start = refinedStart;
320 pos = suffix[refinedStart];
321 end = start;
322 memset(lengthList, 0, sizeof(lengthList));
323
324 /* look forward */
325 { size_t length;
326 do {
327 end++;
328 length = ZDICT_count(b + pos, b + suffix[end]);
329 if (length >= LLIMIT) length = LLIMIT-1;
330 lengthList[length]++;
331 } while (length >=MINMATCHLENGTH);
332 }
333
334 /* look backward */
335 { size_t length = MINMATCHLENGTH;
336 while ((length >= MINMATCHLENGTH) & (start > 0)) {
337 length = ZDICT_count(b + pos, b + suffix[start - 1]);
338 if (length >= LLIMIT) length = LLIMIT - 1;
339 lengthList[length]++;
340 if (length >= MINMATCHLENGTH) start--;
341 }
342 }
343
344 /* largest useful length */
345 memset(cumulLength, 0, sizeof(cumulLength));
346 cumulLength[maxLength-1] = lengthList[maxLength-1];
347 for (i=(int)(maxLength-2); i>=0; i--)
348 cumulLength[i] = cumulLength[i+1] + lengthList[i];
349
350 for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break;
351 maxLength = i;
352
353 /* reduce maxLength in case of final into repetitive data */
354 { U32 l = (U32)maxLength;
355 BYTE const c = b[pos + maxLength-1];
356 while (b[pos+l-2]==c) l--;
357 maxLength = l;
358 }
359 if (maxLength < MINMATCHLENGTH) return solution; /* skip : no long-enough solution */
360
361 /* calculate savings */
362 savings[5] = 0;
363 for (i=MINMATCHLENGTH; i<=(int)maxLength; i++)
364 savings[i] = savings[i-1] + (lengthList[i] * (i-3));
365
366 DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n",
367 (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength);
368
369 solution.pos = (U32)pos;
370 solution.length = (U32)maxLength;
371 solution.savings = savings[maxLength];
372
373 /* mark positions done */
374 { U32 id;
375 for (id=start; id<end; id++) {
376 U32 p, pEnd, length;
377 U32 const testedPos = suffix[id];
378 if (testedPos == pos)
379 length = solution.length;
380 else {
381 length = (U32)ZDICT_count(b+pos, b+testedPos);
382 if (length > solution.length) length = solution.length;
383 }
384 pEnd = (U32)(testedPos + length);
385 for (p=testedPos; p<pEnd; p++)
386 doneMarks[p] = 1;
387 } } }
388
389 return solution;
390 }
391
392
isIncluded(const void * in,const void * container,size_t length)393 static int isIncluded(const void* in, const void* container, size_t length)
394 {
395 const char* const ip = (const char*) in;
396 const char* const into = (const char*) container;
397 size_t u;
398
399 for (u=0; u<length; u++) { /* works because end of buffer is a noisy guard band */
400 if (ip[u] != into[u]) break;
401 }
402
403 return u==length;
404 }
405
406 /*! ZDICT_tryMerge() :
407 check if dictItem can be merged, do it if possible
408 @return : id of destination elt, 0 if not merged
409 */
ZDICT_tryMerge(dictItem * table,dictItem elt,U32 eltNbToSkip,const void * buffer)410 static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const void* buffer)
411 {
412 const U32 tableSize = table->pos;
413 const U32 eltEnd = elt.pos + elt.length;
414 const char* const buf = (const char*) buffer;
415
416 /* tail overlap */
417 U32 u; for (u=1; u<tableSize; u++) {
418 if (u==eltNbToSkip) continue;
419 if ((table[u].pos > elt.pos) && (table[u].pos <= eltEnd)) { /* overlap, existing > new */
420 /* append */
421 U32 const addedLength = table[u].pos - elt.pos;
422 table[u].length += addedLength;
423 table[u].pos = elt.pos;
424 table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */
425 table[u].savings += elt.length / 8; /* rough approx bonus */
426 elt = table[u];
427 /* sort : improve rank */
428 while ((u>1) && (table[u-1].savings < elt.savings))
429 table[u] = table[u-1], u--;
430 table[u] = elt;
431 return u;
432 } }
433
434 /* front overlap */
435 for (u=1; u<tableSize; u++) {
436 if (u==eltNbToSkip) continue;
437
438 if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */
439 /* append */
440 int const addedLength = (int)eltEnd - (table[u].pos + table[u].length);
441 table[u].savings += elt.length / 8; /* rough approx bonus */
442 if (addedLength > 0) { /* otherwise, elt fully included into existing */
443 table[u].length += addedLength;
444 table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */
445 }
446 /* sort : improve rank */
447 elt = table[u];
448 while ((u>1) && (table[u-1].savings < elt.savings))
449 table[u] = table[u-1], u--;
450 table[u] = elt;
451 return u;
452 }
453
454 if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) {
455 if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) {
456 size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 );
457 table[u].pos = elt.pos;
458 table[u].savings += (U32)(elt.savings * addedLength / elt.length);
459 table[u].length = MIN(elt.length, table[u].length + 1);
460 return u;
461 }
462 }
463 }
464
465 return 0;
466 }
467
468
ZDICT_removeDictItem(dictItem * table,U32 id)469 static void ZDICT_removeDictItem(dictItem* table, U32 id)
470 {
471 /* convention : table[0].pos stores nb of elts */
472 U32 const max = table[0].pos;
473 U32 u;
474 if (!id) return; /* protection, should never happen */
475 for (u=id; u<max-1; u++)
476 table[u] = table[u+1];
477 table->pos--;
478 }
479
480
ZDICT_insertDictItem(dictItem * table,U32 maxSize,dictItem elt,const void * buffer)481 static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer)
482 {
483 /* merge if possible */
484 U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer);
485 if (mergeId) {
486 U32 newMerge = 1;
487 while (newMerge) {
488 newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer);
489 if (newMerge) ZDICT_removeDictItem(table, mergeId);
490 mergeId = newMerge;
491 }
492 return;
493 }
494
495 /* insert */
496 { U32 current;
497 U32 nextElt = table->pos;
498 if (nextElt >= maxSize) nextElt = maxSize-1;
499 current = nextElt-1;
500 while (table[current].savings < elt.savings) {
501 table[current+1] = table[current];
502 current--;
503 }
504 table[current+1] = elt;
505 table->pos = nextElt+1;
506 }
507 }
508
509
ZDICT_dictSize(const dictItem * dictList)510 static U32 ZDICT_dictSize(const dictItem* dictList)
511 {
512 U32 u, dictSize = 0;
513 for (u=1; u<dictList[0].pos; u++)
514 dictSize += dictList[u].length;
515 return dictSize;
516 }
517
518
ZDICT_trainBuffer_legacy(dictItem * dictList,U32 dictListSize,const void * const buffer,size_t bufferSize,const size_t * fileSizes,unsigned nbFiles,unsigned minRatio,U32 notificationLevel)519 static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
520 const void* const buffer, size_t bufferSize, /* buffer must end with noisy guard band */
521 const size_t* fileSizes, unsigned nbFiles,
522 unsigned minRatio, U32 notificationLevel)
523 {
524 int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0));
525 int* const suffix = suffix0+1;
526 U32* reverseSuffix = (U32*)malloc((bufferSize)*sizeof(*reverseSuffix));
527 BYTE* doneMarks = (BYTE*)malloc((bufferSize+16)*sizeof(*doneMarks)); /* +16 for overflow security */
528 U32* filePos = (U32*)malloc(nbFiles * sizeof(*filePos));
529 size_t result = 0;
530 clock_t displayClock = 0;
531 clock_t const refreshRate = CLOCKS_PER_SEC * 3 / 10;
532
533 # undef DISPLAYUPDATE
534 # define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \
535 if (ZDICT_clockSpan(displayClock) > refreshRate) \
536 { displayClock = clock(); DISPLAY(__VA_ARGS__); \
537 if (notificationLevel>=4) fflush(stderr); } }
538
539 /* init */
540 DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
541 if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) {
542 result = ERROR(memory_allocation);
543 goto _cleanup;
544 }
545 if (minRatio < MINRATIO) minRatio = MINRATIO;
546 memset(doneMarks, 0, bufferSize+16);
547
548 /* limit sample set size (divsufsort limitation)*/
549 if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20));
550 while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles];
551
552 /* sort */
553 DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20));
554 { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0);
555 if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; }
556 }
557 suffix[bufferSize] = (int)bufferSize; /* leads into noise */
558 suffix0[0] = (int)bufferSize; /* leads into noise */
559 /* build reverse suffix sort */
560 { size_t pos;
561 for (pos=0; pos < bufferSize; pos++)
562 reverseSuffix[suffix[pos]] = (U32)pos;
563 /* note filePos tracks borders between samples.
564 It's not used at this stage, but planned to become useful in a later update */
565 filePos[0] = 0;
566 for (pos=1; pos<nbFiles; pos++)
567 filePos[pos] = (U32)(filePos[pos-1] + fileSizes[pos-1]);
568 }
569
570 DISPLAYLEVEL(2, "finding patterns ... \n");
571 DISPLAYLEVEL(3, "minimum ratio : %u \n", minRatio);
572
573 { U32 cursor; for (cursor=0; cursor < bufferSize; ) {
574 dictItem solution;
575 if (doneMarks[cursor]) { cursor++; continue; }
576 solution = ZDICT_analyzePos(doneMarks, suffix, reverseSuffix[cursor], buffer, minRatio, notificationLevel);
577 if (solution.length==0) { cursor++; continue; }
578 ZDICT_insertDictItem(dictList, dictListSize, solution, buffer);
579 cursor += solution.length;
580 DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100);
581 } }
582
583 _cleanup:
584 free(suffix0);
585 free(reverseSuffix);
586 free(doneMarks);
587 free(filePos);
588 return result;
589 }
590
591
ZDICT_fillNoise(void * buffer,size_t length)592 static void ZDICT_fillNoise(void* buffer, size_t length)
593 {
594 unsigned const prime1 = 2654435761U;
595 unsigned const prime2 = 2246822519U;
596 unsigned acc = prime1;
597 size_t p=0;
598 for (p=0; p<length; p++) {
599 acc *= prime2;
600 ((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);
601 }
602 }
603
604
605 typedef struct
606 {
607 ZSTD_CDict* dict; /* dictionary */
608 ZSTD_CCtx* zc; /* working context */
609 void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */
610 } EStats_ress_t;
611
612 #define MAXREPOFFSET 1024
613
ZDICT_countEStats(EStats_ress_t esr,const ZSTD_parameters * params,unsigned * countLit,unsigned * offsetcodeCount,unsigned * matchlengthCount,unsigned * litlengthCount,U32 * repOffsets,const void * src,size_t srcSize,U32 notificationLevel)614 static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params,
615 unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets,
616 const void* src, size_t srcSize,
617 U32 notificationLevel)
618 {
619 size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params->cParams.windowLog);
620 size_t cSize;
621
622 if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */
623 { size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict);
624 if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; }
625
626 }
627 cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
628 if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; }
629
630 if (cSize) { /* if == 0; block is not compressible */
631 const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
632
633 /* literals stats */
634 { const BYTE* bytePtr;
635 for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++)
636 countLit[*bytePtr]++;
637 }
638
639 /* seqStats */
640 { U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
641 ZSTD_seqToCodes(seqStorePtr);
642
643 { const BYTE* codePtr = seqStorePtr->ofCode;
644 U32 u;
645 for (u=0; u<nbSeq; u++) offsetcodeCount[codePtr[u]]++;
646 }
647
648 { const BYTE* codePtr = seqStorePtr->mlCode;
649 U32 u;
650 for (u=0; u<nbSeq; u++) matchlengthCount[codePtr[u]]++;
651 }
652
653 { const BYTE* codePtr = seqStorePtr->llCode;
654 U32 u;
655 for (u=0; u<nbSeq; u++) litlengthCount[codePtr[u]]++;
656 }
657
658 if (nbSeq >= 2) { /* rep offsets */
659 const seqDef* const seq = seqStorePtr->sequencesStart;
660 U32 offset1 = seq[0].offset - 3;
661 U32 offset2 = seq[1].offset - 3;
662 if (offset1 >= MAXREPOFFSET) offset1 = 0;
663 if (offset2 >= MAXREPOFFSET) offset2 = 0;
664 repOffsets[offset1] += 3;
665 repOffsets[offset2] += 1;
666 } } }
667 }
668
ZDICT_totalSampleSize(const size_t * fileSizes,unsigned nbFiles)669 static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles)
670 {
671 size_t total=0;
672 unsigned u;
673 for (u=0; u<nbFiles; u++) total += fileSizes[u];
674 return total;
675 }
676
677 typedef struct { U32 offset; U32 count; } offsetCount_t;
678
ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1],U32 val,U32 count)679 static void ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1], U32 val, U32 count)
680 {
681 U32 u;
682 table[ZSTD_REP_NUM].offset = val;
683 table[ZSTD_REP_NUM].count = count;
684 for (u=ZSTD_REP_NUM; u>0; u--) {
685 offsetCount_t tmp;
686 if (table[u-1].count >= table[u].count) break;
687 tmp = table[u-1];
688 table[u-1] = table[u];
689 table[u] = tmp;
690 }
691 }
692
693 /* ZDICT_flatLit() :
694 * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.
695 * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.
696 */
ZDICT_flatLit(unsigned * countLit)697 static void ZDICT_flatLit(unsigned* countLit)
698 {
699 int u;
700 for (u=1; u<256; u++) countLit[u] = 2;
701 countLit[0] = 4;
702 countLit[253] = 1;
703 countLit[254] = 1;
704 }
705
706 #define OFFCODE_MAX 30 /* only applicable to first block */
ZDICT_analyzeEntropy(void * dstBuffer,size_t maxDstSize,int compressionLevel,const void * srcBuffer,const size_t * fileSizes,unsigned nbFiles,const void * dictBuffer,size_t dictBufferSize,unsigned notificationLevel)707 static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
708 int compressionLevel,
709 const void* srcBuffer, const size_t* fileSizes, unsigned nbFiles,
710 const void* dictBuffer, size_t dictBufferSize,
711 unsigned notificationLevel)
712 {
713 unsigned countLit[256];
714 HUF_CREATE_STATIC_CTABLE(hufTable, 255);
715 unsigned offcodeCount[OFFCODE_MAX+1];
716 short offcodeNCount[OFFCODE_MAX+1];
717 U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB));
718 unsigned matchLengthCount[MaxML+1];
719 short matchLengthNCount[MaxML+1];
720 unsigned litLengthCount[MaxLL+1];
721 short litLengthNCount[MaxLL+1];
722 U32 repOffset[MAXREPOFFSET];
723 offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];
724 EStats_ress_t esr = { NULL, NULL, NULL };
725 ZSTD_parameters params;
726 U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total;
727 size_t pos = 0, errorCode;
728 size_t eSize = 0;
729 size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles);
730 size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles);
731 BYTE* dstPtr = (BYTE*)dstBuffer;
732
733 /* init */
734 DEBUGLOG(4, "ZDICT_analyzeEntropy");
735 if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */
736 for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */
737 for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;
738 for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1;
739 for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1;
740 memset(repOffset, 0, sizeof(repOffset));
741 repOffset[1] = repOffset[4] = repOffset[8] = 1;
742 memset(bestRepOffset, 0, sizeof(bestRepOffset));
743 if (compressionLevel==0) compressionLevel = ZSTD_CLEVEL_DEFAULT;
744 params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);
745
746 esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem);
747 esr.zc = ZSTD_createCCtx();
748 esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);
749 if (!esr.dict || !esr.zc || !esr.workPlace) {
750 eSize = ERROR(memory_allocation);
751 DISPLAYLEVEL(1, "Not enough memory \n");
752 goto _cleanup;
753 }
754
755 /* collect stats on all samples */
756 for (u=0; u<nbFiles; u++) {
757 ZDICT_countEStats(esr, ¶ms,
758 countLit, offcodeCount, matchLengthCount, litLengthCount, repOffset,
759 (const char*)srcBuffer + pos, fileSizes[u],
760 notificationLevel);
761 pos += fileSizes[u];
762 }
763
764 /* analyze, build stats, starting with literals */
765 { size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
766 if (HUF_isError(maxNbBits)) {
767 eSize = maxNbBits;
768 DISPLAYLEVEL(1, " HUF_buildCTable error \n");
769 goto _cleanup;
770 }
771 if (maxNbBits==8) { /* not compressible : will fail on HUF_writeCTable() */
772 DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n");
773 ZDICT_flatLit(countLit); /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */
774 maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
775 assert(maxNbBits==9);
776 }
777 huffLog = (U32)maxNbBits;
778 }
779
780 /* looking for most common first offsets */
781 { U32 offset;
782 for (offset=1; offset<MAXREPOFFSET; offset++)
783 ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]);
784 }
785 /* note : the result of this phase should be used to better appreciate the impact on statistics */
786
787 total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];
788 errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax, /* useLowProbCount */ 1);
789 if (FSE_isError(errorCode)) {
790 eSize = errorCode;
791 DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n");
792 goto _cleanup;
793 }
794 Offlog = (U32)errorCode;
795
796 total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];
797 errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML, /* useLowProbCount */ 1);
798 if (FSE_isError(errorCode)) {
799 eSize = errorCode;
800 DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n");
801 goto _cleanup;
802 }
803 mlLog = (U32)errorCode;
804
805 total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];
806 errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL, /* useLowProbCount */ 1);
807 if (FSE_isError(errorCode)) {
808 eSize = errorCode;
809 DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n");
810 goto _cleanup;
811 }
812 llLog = (U32)errorCode;
813
814 /* write result to buffer */
815 { size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog);
816 if (HUF_isError(hhSize)) {
817 eSize = hhSize;
818 DISPLAYLEVEL(1, "HUF_writeCTable error \n");
819 goto _cleanup;
820 }
821 dstPtr += hhSize;
822 maxDstSize -= hhSize;
823 eSize += hhSize;
824 }
825
826 { size_t const ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, OFFCODE_MAX, Offlog);
827 if (FSE_isError(ohSize)) {
828 eSize = ohSize;
829 DISPLAYLEVEL(1, "FSE_writeNCount error with offcodeNCount \n");
830 goto _cleanup;
831 }
832 dstPtr += ohSize;
833 maxDstSize -= ohSize;
834 eSize += ohSize;
835 }
836
837 { size_t const mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, MaxML, mlLog);
838 if (FSE_isError(mhSize)) {
839 eSize = mhSize;
840 DISPLAYLEVEL(1, "FSE_writeNCount error with matchLengthNCount \n");
841 goto _cleanup;
842 }
843 dstPtr += mhSize;
844 maxDstSize -= mhSize;
845 eSize += mhSize;
846 }
847
848 { size_t const lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, MaxLL, llLog);
849 if (FSE_isError(lhSize)) {
850 eSize = lhSize;
851 DISPLAYLEVEL(1, "FSE_writeNCount error with litlengthNCount \n");
852 goto _cleanup;
853 }
854 dstPtr += lhSize;
855 maxDstSize -= lhSize;
856 eSize += lhSize;
857 }
858
859 if (maxDstSize<12) {
860 eSize = ERROR(dstSize_tooSmall);
861 DISPLAYLEVEL(1, "not enough space to write RepOffsets \n");
862 goto _cleanup;
863 }
864 # if 0
865 MEM_writeLE32(dstPtr+0, bestRepOffset[0].offset);
866 MEM_writeLE32(dstPtr+4, bestRepOffset[1].offset);
867 MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset);
868 #else
869 /* at this stage, we don't use the result of "most common first offset",
870 as the impact of statistics is not properly evaluated */
871 MEM_writeLE32(dstPtr+0, repStartValue[0]);
872 MEM_writeLE32(dstPtr+4, repStartValue[1]);
873 MEM_writeLE32(dstPtr+8, repStartValue[2]);
874 #endif
875 eSize += 12;
876
877 _cleanup:
878 ZSTD_freeCDict(esr.dict);
879 ZSTD_freeCCtx(esr.zc);
880 free(esr.workPlace);
881
882 return eSize;
883 }
884
885
886
ZDICT_finalizeDictionary(void * dictBuffer,size_t dictBufferCapacity,const void * customDictContent,size_t dictContentSize,const void * samplesBuffer,const size_t * samplesSizes,unsigned nbSamples,ZDICT_params_t params)887 size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
888 const void* customDictContent, size_t dictContentSize,
889 const void* samplesBuffer, const size_t* samplesSizes,
890 unsigned nbSamples, ZDICT_params_t params)
891 {
892 size_t hSize;
893 #define HBUFFSIZE 256 /* should prove large enough for all entropy headers */
894 BYTE header[HBUFFSIZE];
895 int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel;
896 U32 const notificationLevel = params.notificationLevel;
897
898 /* check conditions */
899 DEBUGLOG(4, "ZDICT_finalizeDictionary");
900 if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);
901 if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong);
902 if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
903
904 /* dictionary header */
905 MEM_writeLE32(header, ZSTD_MAGIC_DICTIONARY);
906 { U64 const randomID = XXH64(customDictContent, dictContentSize, 0);
907 U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
908 U32 const dictID = params.dictID ? params.dictID : compliantID;
909 MEM_writeLE32(header+4, dictID);
910 }
911 hSize = 8;
912
913 /* entropy tables */
914 DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
915 DISPLAYLEVEL(2, "statistics ... \n");
916 { size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize,
917 compressionLevel,
918 samplesBuffer, samplesSizes, nbSamples,
919 customDictContent, dictContentSize,
920 notificationLevel);
921 if (ZDICT_isError(eSize)) return eSize;
922 hSize += eSize;
923 }
924
925 /* copy elements in final buffer ; note : src and dst buffer can overlap */
926 if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize;
927 { size_t const dictSize = hSize + dictContentSize;
928 char* dictEnd = (char*)dictBuffer + dictSize;
929 memmove(dictEnd - dictContentSize, customDictContent, dictContentSize);
930 memcpy(dictBuffer, header, hSize);
931 return dictSize;
932 }
933 }
934
935
ZDICT_addEntropyTablesFromBuffer_advanced(void * dictBuffer,size_t dictContentSize,size_t dictBufferCapacity,const void * samplesBuffer,const size_t * samplesSizes,unsigned nbSamples,ZDICT_params_t params)936 static size_t ZDICT_addEntropyTablesFromBuffer_advanced(
937 void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
938 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
939 ZDICT_params_t params)
940 {
941 int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel;
942 U32 const notificationLevel = params.notificationLevel;
943 size_t hSize = 8;
944
945 /* calculate entropy tables */
946 DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
947 DISPLAYLEVEL(2, "statistics ... \n");
948 { size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize,
949 compressionLevel,
950 samplesBuffer, samplesSizes, nbSamples,
951 (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize,
952 notificationLevel);
953 if (ZDICT_isError(eSize)) return eSize;
954 hSize += eSize;
955 }
956
957 /* add dictionary header (after entropy tables) */
958 MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY);
959 { U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);
960 U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
961 U32 const dictID = params.dictID ? params.dictID : compliantID;
962 MEM_writeLE32((char*)dictBuffer+4, dictID);
963 }
964
965 if (hSize + dictContentSize < dictBufferCapacity)
966 memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize);
967 return MIN(dictBufferCapacity, hSize+dictContentSize);
968 }
969
970 /* Hidden declaration for dbio.c */
971 size_t ZDICT_trainFromBuffer_unsafe_legacy(
972 void* dictBuffer, size_t maxDictSize,
973 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
974 ZDICT_legacy_params_t params);
975 /*! ZDICT_trainFromBuffer_unsafe_legacy() :
976 * Warning : `samplesBuffer` must be followed by noisy guard band.
977 * @return : size of dictionary, or an error code which can be tested with ZDICT_isError()
978 */
ZDICT_trainFromBuffer_unsafe_legacy(void * dictBuffer,size_t maxDictSize,const void * samplesBuffer,const size_t * samplesSizes,unsigned nbSamples,ZDICT_legacy_params_t params)979 size_t ZDICT_trainFromBuffer_unsafe_legacy(
980 void* dictBuffer, size_t maxDictSize,
981 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
982 ZDICT_legacy_params_t params)
983 {
984 U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16));
985 dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList));
986 unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel;
987 unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity;
988 size_t const targetDictSize = maxDictSize;
989 size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
990 size_t dictSize = 0;
991 U32 const notificationLevel = params.zParams.notificationLevel;
992
993 /* checks */
994 if (!dictList) return ERROR(memory_allocation);
995 if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); } /* requested dictionary size is too small */
996 if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); } /* not enough source to create dictionary */
997
998 /* init */
999 ZDICT_initDictItem(dictList);
1000
1001 /* build dictionary */
1002 ZDICT_trainBuffer_legacy(dictList, dictListSize,
1003 samplesBuffer, samplesBuffSize,
1004 samplesSizes, nbSamples,
1005 minRep, notificationLevel);
1006
1007 /* display best matches */
1008 if (params.zParams.notificationLevel>= 3) {
1009 unsigned const nb = MIN(25, dictList[0].pos);
1010 unsigned const dictContentSize = ZDICT_dictSize(dictList);
1011 unsigned u;
1012 DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", (unsigned)dictList[0].pos-1, dictContentSize);
1013 DISPLAYLEVEL(3, "list %u best segments \n", nb-1);
1014 for (u=1; u<nb; u++) {
1015 unsigned const pos = dictList[u].pos;
1016 unsigned const length = dictList[u].length;
1017 U32 const printedLength = MIN(40, length);
1018 if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) {
1019 free(dictList);
1020 return ERROR(GENERIC); /* should never happen */
1021 }
1022 DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",
1023 u, length, pos, (unsigned)dictList[u].savings);
1024 ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);
1025 DISPLAYLEVEL(3, "| \n");
1026 } }
1027
1028
1029 /* create dictionary */
1030 { unsigned dictContentSize = ZDICT_dictSize(dictList);
1031 if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */
1032 if (dictContentSize < targetDictSize/4) {
1033 DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize);
1034 if (samplesBuffSize < 10 * targetDictSize)
1035 DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20));
1036 if (minRep > MINRATIO) {
1037 DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);
1038 DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n");
1039 }
1040 }
1041
1042 if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {
1043 unsigned proposedSelectivity = selectivity-1;
1044 while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }
1045 DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize);
1046 DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);
1047 DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n");
1048 }
1049
1050 /* limit dictionary size */
1051 { U32 const max = dictList->pos; /* convention : nb of useful elts within dictList */
1052 U32 currentSize = 0;
1053 U32 n; for (n=1; n<max; n++) {
1054 currentSize += dictList[n].length;
1055 if (currentSize > targetDictSize) { currentSize -= dictList[n].length; break; }
1056 }
1057 dictList->pos = n;
1058 dictContentSize = currentSize;
1059 }
1060
1061 /* build dict content */
1062 { U32 u;
1063 BYTE* ptr = (BYTE*)dictBuffer + maxDictSize;
1064 for (u=1; u<dictList->pos; u++) {
1065 U32 l = dictList[u].length;
1066 ptr -= l;
1067 if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); } /* should not happen */
1068 memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l);
1069 } }
1070
1071 dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize,
1072 samplesBuffer, samplesSizes, nbSamples,
1073 params.zParams);
1074 }
1075
1076 /* clean up */
1077 free(dictList);
1078 return dictSize;
1079 }
1080
1081
1082 /* ZDICT_trainFromBuffer_legacy() :
1083 * issue : samplesBuffer need to be followed by a noisy guard band.
1084 * work around : duplicate the buffer, and add the noise */
ZDICT_trainFromBuffer_legacy(void * dictBuffer,size_t dictBufferCapacity,const void * samplesBuffer,const size_t * samplesSizes,unsigned nbSamples,ZDICT_legacy_params_t params)1085 size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity,
1086 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
1087 ZDICT_legacy_params_t params)
1088 {
1089 size_t result;
1090 void* newBuff;
1091 size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
1092 if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0; /* not enough content => no dictionary */
1093
1094 newBuff = malloc(sBuffSize + NOISELENGTH);
1095 if (!newBuff) return ERROR(memory_allocation);
1096
1097 memcpy(newBuff, samplesBuffer, sBuffSize);
1098 ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH); /* guard band, for end of buffer condition */
1099
1100 result =
1101 ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff,
1102 samplesSizes, nbSamples, params);
1103 free(newBuff);
1104 return result;
1105 }
1106
1107
ZDICT_trainFromBuffer(void * dictBuffer,size_t dictBufferCapacity,const void * samplesBuffer,const size_t * samplesSizes,unsigned nbSamples)1108 size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
1109 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
1110 {
1111 ZDICT_fastCover_params_t params;
1112 DEBUGLOG(3, "ZDICT_trainFromBuffer");
1113 memset(¶ms, 0, sizeof(params));
1114 params.d = 8;
1115 params.steps = 4;
1116 /* Use default level since no compression level information is available */
1117 params.zParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;
1118 #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1)
1119 params.zParams.notificationLevel = DEBUGLEVEL;
1120 #endif
1121 return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity,
1122 samplesBuffer, samplesSizes, nbSamples,
1123 ¶ms);
1124 }
1125
ZDICT_addEntropyTablesFromBuffer(void * dictBuffer,size_t dictContentSize,size_t dictBufferCapacity,const void * samplesBuffer,const size_t * samplesSizes,unsigned nbSamples)1126 size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
1127 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
1128 {
1129 ZDICT_params_t params;
1130 memset(¶ms, 0, sizeof(params));
1131 return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity,
1132 samplesBuffer, samplesSizes, nbSamples,
1133 params);
1134 }
1135