• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) Meta Platforms, Inc. and affiliates.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 /* *****************************************************************************
12  * Constructs a dictionary using a heuristic based on the following paper:
13  *
14  * Liao, Petri, Moffat, Wirth
15  * Effective Construction of Relative Lempel-Ziv Dictionaries
16  * Published in WWW 2016.
17  *
18  * Adapted from code originally written by @ot (Giuseppe Ottaviano).
19  ******************************************************************************/
20 
21 /*-*************************************
22 *  Dependencies
23 ***************************************/
24 /* qsort_r is an extension. */
25 #if defined(__linux) || defined(__linux__) || defined(linux) || defined(__gnu_linux__) || \
26     defined(__CYGWIN__) || defined(__MSYS__)
27 #if !defined(_GNU_SOURCE) && !defined(__ANDROID__) /* NDK doesn't ship qsort_r(). */
28 #define _GNU_SOURCE
29 #endif
30 #endif
31 
32 #include <stdio.h>  /* fprintf */
33 #include <stdlib.h> /* malloc, free, qsort_r */
34 
35 #include <string.h> /* memset */
36 #include <time.h>   /* clock */
37 
38 #ifndef ZDICT_STATIC_LINKING_ONLY
39 #  define ZDICT_STATIC_LINKING_ONLY
40 #endif
41 
42 #include "../common/mem.h" /* read */
43 #include "../common/pool.h" /* POOL_ctx */
44 #include "../common/threading.h" /* ZSTD_pthread_mutex_t */
45 #include "../common/zstd_internal.h" /* includes zstd.h */
46 #include "../common/bits.h" /* ZSTD_highbit32 */
47 #include "../zdict.h"
48 #include "cover.h"
49 
50 /*-*************************************
51 *  Constants
52 ***************************************/
53 /**
54 * There are 32bit indexes used to ref samples, so limit samples size to 4GB
55 * on 64bit builds.
56 * For 32bit builds we choose 1 GB.
57 * Most 32bit platforms have 2GB user-mode addressable space and we allocate a large
58 * contiguous buffer, so 1GB is already a high limit.
59 */
60 #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
61 #define COVER_DEFAULT_SPLITPOINT 1.0
62 
63 /*-*************************************
64 *  Console display
65 ***************************************/
66 #ifndef LOCALDISPLAYLEVEL
67 static int g_displayLevel = 0;
68 #endif
69 #undef  DISPLAY
70 #define DISPLAY(...)                                                           \
71   {                                                                            \
72     fprintf(stderr, __VA_ARGS__);                                              \
73     fflush(stderr);                                                            \
74   }
75 #undef  LOCALDISPLAYLEVEL
76 #define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \
77   if (displayLevel >= l) {                                                     \
78     DISPLAY(__VA_ARGS__);                                                      \
79   } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
80 #undef  DISPLAYLEVEL
81 #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
82 
83 #ifndef LOCALDISPLAYUPDATE
84 static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100;
85 static clock_t g_time = 0;
86 #endif
87 #undef  LOCALDISPLAYUPDATE
88 #define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \
89   if (displayLevel >= l) {                                                     \
90     if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) {           \
91       g_time = clock();                                                        \
92       DISPLAY(__VA_ARGS__);                                                    \
93     }                                                                          \
94   }
95 #undef  DISPLAYUPDATE
96 #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
97 
98 /*-*************************************
99 * Hash table
100 ***************************************
101 * A small specialized hash map for storing activeDmers.
102 * The map does not resize, so if it becomes full it will loop forever.
103 * Thus, the map must be large enough to store every value.
104 * The map implements linear probing and keeps its load less than 0.5.
105 */
106 
107 #define MAP_EMPTY_VALUE ((U32)-1)
108 typedef struct COVER_map_pair_t_s {
109   U32 key;
110   U32 value;
111 } COVER_map_pair_t;
112 
113 typedef struct COVER_map_s {
114   COVER_map_pair_t *data;
115   U32 sizeLog;
116   U32 size;
117   U32 sizeMask;
118 } COVER_map_t;
119 
120 /**
121  * Clear the map.
122  */
COVER_map_clear(COVER_map_t * map)123 static void COVER_map_clear(COVER_map_t *map) {
124   memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
125 }
126 
127 /**
128  * Initializes a map of the given size.
129  * Returns 1 on success and 0 on failure.
130  * The map must be destroyed with COVER_map_destroy().
131  * The map is only guaranteed to be large enough to hold size elements.
132  */
COVER_map_init(COVER_map_t * map,U32 size)133 static int COVER_map_init(COVER_map_t *map, U32 size) {
134   map->sizeLog = ZSTD_highbit32(size) + 2;
135   map->size = (U32)1 << map->sizeLog;
136   map->sizeMask = map->size - 1;
137   map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
138   if (!map->data) {
139     map->sizeLog = 0;
140     map->size = 0;
141     return 0;
142   }
143   COVER_map_clear(map);
144   return 1;
145 }
146 
147 /**
148  * Internal hash function
149  */
150 static const U32 COVER_prime4bytes = 2654435761U;
COVER_map_hash(COVER_map_t * map,U32 key)151 static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
152   return (key * COVER_prime4bytes) >> (32 - map->sizeLog);
153 }
154 
155 /**
156  * Helper function that returns the index that a key should be placed into.
157  */
COVER_map_index(COVER_map_t * map,U32 key)158 static U32 COVER_map_index(COVER_map_t *map, U32 key) {
159   const U32 hash = COVER_map_hash(map, key);
160   U32 i;
161   for (i = hash;; i = (i + 1) & map->sizeMask) {
162     COVER_map_pair_t *pos = &map->data[i];
163     if (pos->value == MAP_EMPTY_VALUE) {
164       return i;
165     }
166     if (pos->key == key) {
167       return i;
168     }
169   }
170 }
171 
172 /**
173  * Returns the pointer to the value for key.
174  * If key is not in the map, it is inserted and the value is set to 0.
175  * The map must not be full.
176  */
COVER_map_at(COVER_map_t * map,U32 key)177 static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
178   COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
179   if (pos->value == MAP_EMPTY_VALUE) {
180     pos->key = key;
181     pos->value = 0;
182   }
183   return &pos->value;
184 }
185 
186 /**
187  * Deletes key from the map if present.
188  */
COVER_map_remove(COVER_map_t * map,U32 key)189 static void COVER_map_remove(COVER_map_t *map, U32 key) {
190   U32 i = COVER_map_index(map, key);
191   COVER_map_pair_t *del = &map->data[i];
192   U32 shift = 1;
193   if (del->value == MAP_EMPTY_VALUE) {
194     return;
195   }
196   for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
197     COVER_map_pair_t *const pos = &map->data[i];
198     /* If the position is empty we are done */
199     if (pos->value == MAP_EMPTY_VALUE) {
200       del->value = MAP_EMPTY_VALUE;
201       return;
202     }
203     /* If pos can be moved to del do so */
204     if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
205       del->key = pos->key;
206       del->value = pos->value;
207       del = pos;
208       shift = 1;
209     } else {
210       ++shift;
211     }
212   }
213 }
214 
215 /**
216  * Destroys a map that is inited with COVER_map_init().
217  */
COVER_map_destroy(COVER_map_t * map)218 static void COVER_map_destroy(COVER_map_t *map) {
219   if (map->data) {
220     free(map->data);
221   }
222   map->data = NULL;
223   map->size = 0;
224 }
225 
226 /*-*************************************
227 * Context
228 ***************************************/
229 
230 typedef struct {
231   const BYTE *samples;
232   size_t *offsets;
233   const size_t *samplesSizes;
234   size_t nbSamples;
235   size_t nbTrainSamples;
236   size_t nbTestSamples;
237   U32 *suffix;
238   size_t suffixSize;
239   U32 *freqs;
240   U32 *dmerAt;
241   unsigned d;
242 } COVER_ctx_t;
243 
244 #if !defined(_GNU_SOURCE) && !defined(__APPLE__) && !defined(_MSC_VER)
245 /* C90 only offers qsort() that needs a global context. */
246 static COVER_ctx_t *g_coverCtx = NULL;
247 #endif
248 
249 /*-*************************************
250 *  Helper functions
251 ***************************************/
252 
253 /**
254  * Returns the sum of the sample sizes.
255  */
COVER_sum(const size_t * samplesSizes,unsigned nbSamples)256 size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
257   size_t sum = 0;
258   unsigned i;
259   for (i = 0; i < nbSamples; ++i) {
260     sum += samplesSizes[i];
261   }
262   return sum;
263 }
264 
265 /**
266  * Returns -1 if the dmer at lp is less than the dmer at rp.
267  * Return 0 if the dmers at lp and rp are equal.
268  * Returns 1 if the dmer at lp is greater than the dmer at rp.
269  */
COVER_cmp(COVER_ctx_t * ctx,const void * lp,const void * rp)270 static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
271   U32 const lhs = *(U32 const *)lp;
272   U32 const rhs = *(U32 const *)rp;
273   return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
274 }
275 /**
276  * Faster version for d <= 8.
277  */
COVER_cmp8(COVER_ctx_t * ctx,const void * lp,const void * rp)278 static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
279   U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
280   U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
281   U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
282   if (lhs < rhs) {
283     return -1;
284   }
285   return (lhs > rhs);
286 }
287 
288 /**
289  * Same as COVER_cmp() except ties are broken by pointer value
290  */
291 #if (defined(_WIN32) && defined(_MSC_VER)) || defined(__APPLE__)
COVER_strict_cmp(void * g_coverCtx,const void * lp,const void * rp)292 static int WIN_CDECL COVER_strict_cmp(void* g_coverCtx, const void* lp, const void* rp) {
293 #elif defined(_GNU_SOURCE)
294 static int COVER_strict_cmp(const void *lp, const void *rp, void *g_coverCtx) {
295 #else /* C90 fallback.*/
296 static int COVER_strict_cmp(const void *lp, const void *rp) {
297 #endif
298   int result = COVER_cmp((COVER_ctx_t*)g_coverCtx, lp, rp);
299   if (result == 0) {
300     result = lp < rp ? -1 : 1;
301   }
302   return result;
303 }
304 /**
305  * Faster version for d <= 8.
306  */
307 #if (defined(_WIN32) && defined(_MSC_VER)) || defined(__APPLE__)
308 static int WIN_CDECL COVER_strict_cmp8(void* g_coverCtx, const void* lp, const void* rp) {
309 #elif defined(_GNU_SOURCE)
310 static int COVER_strict_cmp8(const void *lp, const void *rp, void *g_coverCtx) {
311 #else /* C90 fallback.*/
312 static int COVER_strict_cmp8(const void *lp, const void *rp) {
313 #endif
314   int result = COVER_cmp8((COVER_ctx_t*)g_coverCtx, lp, rp);
315   if (result == 0) {
316     result = lp < rp ? -1 : 1;
317   }
318   return result;
319 }
320 
321 /**
322  * Abstract away divergence of qsort_r() parameters.
323  * Hopefully when C11 become the norm, we will be able
324  * to clean it up.
325  */
326 static void stableSort(COVER_ctx_t *ctx) {
327 #if defined(__APPLE__)
328     qsort_r(ctx->suffix, ctx->suffixSize, sizeof(U32),
329             ctx,
330             (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
331 #elif defined(_GNU_SOURCE)
332     qsort_r(ctx->suffix, ctx->suffixSize, sizeof(U32),
333             (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp),
334             ctx);
335 #elif defined(_WIN32) && defined(_MSC_VER)
336     qsort_s(ctx->suffix, ctx->suffixSize, sizeof(U32),
337             (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp),
338             ctx);
339 #elif defined(__OpenBSD__)
340     g_coverCtx = ctx;
341     mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
342           (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
343 #else /* C90 fallback.*/
344     g_coverCtx = ctx;
345     /* TODO(cavalcanti): implement a reentrant qsort() when is not available. */
346     qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
347           (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
348 #endif
349 }
350 
351 /**
352  * Returns the first pointer in [first, last) whose element does not compare
353  * less than value.  If no such element exists it returns last.
354  */
355 static const size_t *COVER_lower_bound(const size_t* first, const size_t* last,
356                                        size_t value) {
357   size_t count = (size_t)(last - first);
358   assert(last >= first);
359   while (count != 0) {
360     size_t step = count / 2;
361     const size_t *ptr = first;
362     ptr += step;
363     if (*ptr < value) {
364       first = ++ptr;
365       count -= step + 1;
366     } else {
367       count = step;
368     }
369   }
370   return first;
371 }
372 
373 /**
374  * Generic groupBy function.
375  * Groups an array sorted by cmp into groups with equivalent values.
376  * Calls grp for each group.
377  */
378 static void
379 COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
380               int (*cmp)(COVER_ctx_t *, const void *, const void *),
381               void (*grp)(COVER_ctx_t *, const void *, const void *)) {
382   const BYTE *ptr = (const BYTE *)data;
383   size_t num = 0;
384   while (num < count) {
385     const BYTE *grpEnd = ptr + size;
386     ++num;
387     while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
388       grpEnd += size;
389       ++num;
390     }
391     grp(ctx, ptr, grpEnd);
392     ptr = grpEnd;
393   }
394 }
395 
396 /*-*************************************
397 *  Cover functions
398 ***************************************/
399 
400 /**
401  * Called on each group of positions with the same dmer.
402  * Counts the frequency of each dmer and saves it in the suffix array.
403  * Fills `ctx->dmerAt`.
404  */
405 static void COVER_group(COVER_ctx_t *ctx, const void *group,
406                         const void *groupEnd) {
407   /* The group consists of all the positions with the same first d bytes. */
408   const U32 *grpPtr = (const U32 *)group;
409   const U32 *grpEnd = (const U32 *)groupEnd;
410   /* The dmerId is how we will reference this dmer.
411    * This allows us to map the whole dmer space to a much smaller space, the
412    * size of the suffix array.
413    */
414   const U32 dmerId = (U32)(grpPtr - ctx->suffix);
415   /* Count the number of samples this dmer shows up in */
416   U32 freq = 0;
417   /* Details */
418   const size_t *curOffsetPtr = ctx->offsets;
419   const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
420   /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
421    * different sample than the last.
422    */
423   size_t curSampleEnd = ctx->offsets[0];
424   for (; grpPtr != grpEnd; ++grpPtr) {
425     /* Save the dmerId for this position so we can get back to it. */
426     ctx->dmerAt[*grpPtr] = dmerId;
427     /* Dictionaries only help for the first reference to the dmer.
428      * After that zstd can reference the match from the previous reference.
429      * So only count each dmer once for each sample it is in.
430      */
431     if (*grpPtr < curSampleEnd) {
432       continue;
433     }
434     freq += 1;
435     /* Binary search to find the end of the sample *grpPtr is in.
436      * In the common case that grpPtr + 1 == grpEnd we can skip the binary
437      * search because the loop is over.
438      */
439     if (grpPtr + 1 != grpEnd) {
440       const size_t *sampleEndPtr =
441           COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
442       curSampleEnd = *sampleEndPtr;
443       curOffsetPtr = sampleEndPtr + 1;
444     }
445   }
446   /* At this point we are never going to look at this segment of the suffix
447    * array again.  We take advantage of this fact to save memory.
448    * We store the frequency of the dmer in the first position of the group,
449    * which is dmerId.
450    */
451   ctx->suffix[dmerId] = freq;
452 }
453 
454 
455 /**
456  * Selects the best segment in an epoch.
457  * Segments of are scored according to the function:
458  *
459  * Let F(d) be the frequency of dmer d.
460  * Let S_i be the dmer at position i of segment S which has length k.
461  *
462  *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
463  *
464  * Once the dmer d is in the dictionary we set F(d) = 0.
465  */
466 static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
467                                            COVER_map_t *activeDmers, U32 begin,
468                                            U32 end,
469                                            ZDICT_cover_params_t parameters) {
470   /* Constants */
471   const U32 k = parameters.k;
472   const U32 d = parameters.d;
473   const U32 dmersInK = k - d + 1;
474   /* Try each segment (activeSegment) and save the best (bestSegment) */
475   COVER_segment_t bestSegment = {0, 0, 0};
476   COVER_segment_t activeSegment;
477   /* Reset the activeDmers in the segment */
478   COVER_map_clear(activeDmers);
479   /* The activeSegment starts at the beginning of the epoch. */
480   activeSegment.begin = begin;
481   activeSegment.end = begin;
482   activeSegment.score = 0;
483   /* Slide the activeSegment through the whole epoch.
484    * Save the best segment in bestSegment.
485    */
486   while (activeSegment.end < end) {
487     /* The dmerId for the dmer at the next position */
488     U32 newDmer = ctx->dmerAt[activeSegment.end];
489     /* The entry in activeDmers for this dmerId */
490     U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
491     /* If the dmer isn't already present in the segment add its score. */
492     if (*newDmerOcc == 0) {
493       /* The paper suggest using the L-0.5 norm, but experiments show that it
494        * doesn't help.
495        */
496       activeSegment.score += freqs[newDmer];
497     }
498     /* Add the dmer to the segment */
499     activeSegment.end += 1;
500     *newDmerOcc += 1;
501 
502     /* If the window is now too large, drop the first position */
503     if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
504       U32 delDmer = ctx->dmerAt[activeSegment.begin];
505       U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
506       activeSegment.begin += 1;
507       *delDmerOcc -= 1;
508       /* If this is the last occurrence of the dmer, subtract its score */
509       if (*delDmerOcc == 0) {
510         COVER_map_remove(activeDmers, delDmer);
511         activeSegment.score -= freqs[delDmer];
512       }
513     }
514 
515     /* If this segment is the best so far save it */
516     if (activeSegment.score > bestSegment.score) {
517       bestSegment = activeSegment;
518     }
519   }
520   {
521     /* Trim off the zero frequency head and tail from the segment. */
522     U32 newBegin = bestSegment.end;
523     U32 newEnd = bestSegment.begin;
524     U32 pos;
525     for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
526       U32 freq = freqs[ctx->dmerAt[pos]];
527       if (freq != 0) {
528         newBegin = MIN(newBegin, pos);
529         newEnd = pos + 1;
530       }
531     }
532     bestSegment.begin = newBegin;
533     bestSegment.end = newEnd;
534   }
535   {
536     /* Zero out the frequency of each dmer covered by the chosen segment. */
537     U32 pos;
538     for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
539       freqs[ctx->dmerAt[pos]] = 0;
540     }
541   }
542   return bestSegment;
543 }
544 
545 /**
546  * Check the validity of the parameters.
547  * Returns non-zero if the parameters are valid and 0 otherwise.
548  */
549 static int COVER_checkParameters(ZDICT_cover_params_t parameters,
550                                  size_t maxDictSize) {
551   /* k and d are required parameters */
552   if (parameters.d == 0 || parameters.k == 0) {
553     return 0;
554   }
555   /* k <= maxDictSize */
556   if (parameters.k > maxDictSize) {
557     return 0;
558   }
559   /* d <= k */
560   if (parameters.d > parameters.k) {
561     return 0;
562   }
563   /* 0 < splitPoint <= 1 */
564   if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){
565     return 0;
566   }
567   return 1;
568 }
569 
570 /**
571  * Clean up a context initialized with `COVER_ctx_init()`.
572  */
573 static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
574   if (!ctx) {
575     return;
576   }
577   if (ctx->suffix) {
578     free(ctx->suffix);
579     ctx->suffix = NULL;
580   }
581   if (ctx->freqs) {
582     free(ctx->freqs);
583     ctx->freqs = NULL;
584   }
585   if (ctx->dmerAt) {
586     free(ctx->dmerAt);
587     ctx->dmerAt = NULL;
588   }
589   if (ctx->offsets) {
590     free(ctx->offsets);
591     ctx->offsets = NULL;
592   }
593 }
594 
595 /**
596  * Prepare a context for dictionary building.
597  * The context is only dependent on the parameter `d` and can be used multiple
598  * times.
599  * Returns 0 on success or error code on error.
600  * The context must be destroyed with `COVER_ctx_destroy()`.
601  */
602 static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
603                           const size_t *samplesSizes, unsigned nbSamples,
604                           unsigned d, double splitPoint)
605 {
606   const BYTE *const samples = (const BYTE *)samplesBuffer;
607   const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
608   /* Split samples into testing and training sets */
609   const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
610   const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
611   const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
612   const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
613   /* Checks */
614   if (totalSamplesSize < MAX(d, sizeof(U64)) ||
615       totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
616     DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
617                  (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
618     return ERROR(srcSize_wrong);
619   }
620   /* Check if there are at least 5 training samples */
621   if (nbTrainSamples < 5) {
622     DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples);
623     return ERROR(srcSize_wrong);
624   }
625   /* Check if there's testing sample */
626   if (nbTestSamples < 1) {
627     DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples);
628     return ERROR(srcSize_wrong);
629   }
630   /* Zero the context */
631   memset(ctx, 0, sizeof(*ctx));
632   DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
633                (unsigned)trainingSamplesSize);
634   DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
635                (unsigned)testSamplesSize);
636   ctx->samples = samples;
637   ctx->samplesSizes = samplesSizes;
638   ctx->nbSamples = nbSamples;
639   ctx->nbTrainSamples = nbTrainSamples;
640   ctx->nbTestSamples = nbTestSamples;
641   /* Partial suffix array */
642   ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
643   ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
644   /* Maps index to the dmerID */
645   ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
646   /* The offsets of each file */
647   ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
648   if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
649     DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
650     COVER_ctx_destroy(ctx);
651     return ERROR(memory_allocation);
652   }
653   ctx->freqs = NULL;
654   ctx->d = d;
655 
656   /* Fill offsets from the samplesSizes */
657   {
658     U32 i;
659     ctx->offsets[0] = 0;
660     for (i = 1; i <= nbSamples; ++i) {
661       ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
662     }
663   }
664   DISPLAYLEVEL(2, "Constructing partial suffix array\n");
665   {
666     /* suffix is a partial suffix array.
667      * It only sorts suffixes by their first parameters.d bytes.
668      * The sort is stable, so each dmer group is sorted by position in input.
669      */
670     U32 i;
671     for (i = 0; i < ctx->suffixSize; ++i) {
672       ctx->suffix[i] = i;
673     }
674     stableSort(ctx);
675   }
676   DISPLAYLEVEL(2, "Computing frequencies\n");
677   /* For each dmer group (group of positions with the same first d bytes):
678    * 1. For each position we set dmerAt[position] = dmerID.  The dmerID is
679    *    (groupBeginPtr - suffix).  This allows us to go from position to
680    *    dmerID so we can look up values in freq.
681    * 2. We calculate how many samples the dmer occurs in and save it in
682    *    freqs[dmerId].
683    */
684   COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
685                 (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
686   ctx->freqs = ctx->suffix;
687   ctx->suffix = NULL;
688   return 0;
689 }
690 
691 void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)
692 {
693   const double ratio = (double)nbDmers / (double)maxDictSize;
694   if (ratio >= 10) {
695       return;
696   }
697   LOCALDISPLAYLEVEL(displayLevel, 1,
698                     "WARNING: The maximum dictionary size %u is too large "
699                     "compared to the source size %u! "
700                     "size(source)/size(dictionary) = %f, but it should be >= "
701                     "10! This may lead to a subpar dictionary! We recommend "
702                     "training on sources at least 10x, and preferably 100x "
703                     "the size of the dictionary! \n", (U32)maxDictSize,
704                     (U32)nbDmers, ratio);
705 }
706 
707 COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize,
708                                        U32 nbDmers, U32 k, U32 passes)
709 {
710   const U32 minEpochSize = k * 10;
711   COVER_epoch_info_t epochs;
712   epochs.num = MAX(1, maxDictSize / k / passes);
713   epochs.size = nbDmers / epochs.num;
714   if (epochs.size >= minEpochSize) {
715       assert(epochs.size * epochs.num <= nbDmers);
716       return epochs;
717   }
718   epochs.size = MIN(minEpochSize, nbDmers);
719   epochs.num = nbDmers / epochs.size;
720   assert(epochs.size * epochs.num <= nbDmers);
721   return epochs;
722 }
723 
724 /**
725  * Given the prepared context build the dictionary.
726  */
727 static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
728                                     COVER_map_t *activeDmers, void *dictBuffer,
729                                     size_t dictBufferCapacity,
730                                     ZDICT_cover_params_t parameters) {
731   BYTE *const dict = (BYTE *)dictBuffer;
732   size_t tail = dictBufferCapacity;
733   /* Divide the data into epochs. We will select one segment from each epoch. */
734   const COVER_epoch_info_t epochs = COVER_computeEpochs(
735       (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4);
736   const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3));
737   size_t zeroScoreRun = 0;
738   size_t epoch;
739   DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
740                 (U32)epochs.num, (U32)epochs.size);
741   /* Loop through the epochs until there are no more segments or the dictionary
742    * is full.
743    */
744   for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
745     const U32 epochBegin = (U32)(epoch * epochs.size);
746     const U32 epochEnd = epochBegin + epochs.size;
747     size_t segmentSize;
748     /* Select a segment */
749     COVER_segment_t segment = COVER_selectSegment(
750         ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
751     /* If the segment covers no dmers, then we are out of content.
752      * There may be new content in other epochs, for continue for some time.
753      */
754     if (segment.score == 0) {
755       if (++zeroScoreRun >= maxZeroScoreRun) {
756           break;
757       }
758       continue;
759     }
760     zeroScoreRun = 0;
761     /* Trim the segment if necessary and if it is too small then we are done */
762     segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
763     if (segmentSize < parameters.d) {
764       break;
765     }
766     /* We fill the dictionary from the back to allow the best segments to be
767      * referenced with the smallest offsets.
768      */
769     tail -= segmentSize;
770     memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
771     DISPLAYUPDATE(
772         2, "\r%u%%       ",
773         (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
774   }
775   DISPLAYLEVEL(2, "\r%79s\r", "");
776   return tail;
777 }
778 
779 ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover(
780     void *dictBuffer, size_t dictBufferCapacity,
781     const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
782     ZDICT_cover_params_t parameters)
783 {
784   BYTE* const dict = (BYTE*)dictBuffer;
785   COVER_ctx_t ctx;
786   COVER_map_t activeDmers;
787   parameters.splitPoint = 1.0;
788   /* Initialize global data */
789   g_displayLevel = (int)parameters.zParams.notificationLevel;
790   /* Checks */
791   if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
792     DISPLAYLEVEL(1, "Cover parameters incorrect\n");
793     return ERROR(parameter_outOfBound);
794   }
795   if (nbSamples == 0) {
796     DISPLAYLEVEL(1, "Cover must have at least one input file\n");
797     return ERROR(srcSize_wrong);
798   }
799   if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
800     DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
801                  ZDICT_DICTSIZE_MIN);
802     return ERROR(dstSize_tooSmall);
803   }
804   /* Initialize context and activeDmers */
805   {
806     size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
807                       parameters.d, parameters.splitPoint);
808     if (ZSTD_isError(initVal)) {
809       return initVal;
810     }
811   }
812   COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel);
813   if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
814     DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
815     COVER_ctx_destroy(&ctx);
816     return ERROR(memory_allocation);
817   }
818 
819   DISPLAYLEVEL(2, "Building dictionary\n");
820   {
821     const size_t tail =
822         COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
823                               dictBufferCapacity, parameters);
824     const size_t dictionarySize = ZDICT_finalizeDictionary(
825         dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
826         samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
827     if (!ZSTD_isError(dictionarySize)) {
828       DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
829                    (unsigned)dictionarySize);
830     }
831     COVER_ctx_destroy(&ctx);
832     COVER_map_destroy(&activeDmers);
833     return dictionarySize;
834   }
835 }
836 
837 
838 
839 size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
840                                     const size_t *samplesSizes, const BYTE *samples,
841                                     size_t *offsets,
842                                     size_t nbTrainSamples, size_t nbSamples,
843                                     BYTE *const dict, size_t dictBufferCapacity) {
844   size_t totalCompressedSize = ERROR(GENERIC);
845   /* Pointers */
846   ZSTD_CCtx *cctx;
847   ZSTD_CDict *cdict;
848   void *dst;
849   /* Local variables */
850   size_t dstCapacity;
851   size_t i;
852   /* Allocate dst with enough space to compress the maximum sized sample */
853   {
854     size_t maxSampleSize = 0;
855     i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
856     for (; i < nbSamples; ++i) {
857       maxSampleSize = MAX(samplesSizes[i], maxSampleSize);
858     }
859     dstCapacity = ZSTD_compressBound(maxSampleSize);
860     dst = malloc(dstCapacity);
861   }
862   /* Create the cctx and cdict */
863   cctx = ZSTD_createCCtx();
864   cdict = ZSTD_createCDict(dict, dictBufferCapacity,
865                            parameters.zParams.compressionLevel);
866   if (!dst || !cctx || !cdict) {
867     goto _compressCleanup;
868   }
869   /* Compress each sample and sum their sizes (or error) */
870   totalCompressedSize = dictBufferCapacity;
871   i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
872   for (; i < nbSamples; ++i) {
873     const size_t size = ZSTD_compress_usingCDict(
874         cctx, dst, dstCapacity, samples + offsets[i],
875         samplesSizes[i], cdict);
876     if (ZSTD_isError(size)) {
877       totalCompressedSize = size;
878       goto _compressCleanup;
879     }
880     totalCompressedSize += size;
881   }
882 _compressCleanup:
883   ZSTD_freeCCtx(cctx);
884   ZSTD_freeCDict(cdict);
885   if (dst) {
886     free(dst);
887   }
888   return totalCompressedSize;
889 }
890 
891 
892 /**
893  * Initialize the `COVER_best_t`.
894  */
895 void COVER_best_init(COVER_best_t *best) {
896   if (best==NULL) return; /* compatible with init on NULL */
897   (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
898   (void)ZSTD_pthread_cond_init(&best->cond, NULL);
899   best->liveJobs = 0;
900   best->dict = NULL;
901   best->dictSize = 0;
902   best->compressedSize = (size_t)-1;
903   memset(&best->parameters, 0, sizeof(best->parameters));
904 }
905 
906 /**
907  * Wait until liveJobs == 0.
908  */
909 void COVER_best_wait(COVER_best_t *best) {
910   if (!best) {
911     return;
912   }
913   ZSTD_pthread_mutex_lock(&best->mutex);
914   while (best->liveJobs != 0) {
915     ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
916   }
917   ZSTD_pthread_mutex_unlock(&best->mutex);
918 }
919 
920 /**
921  * Call COVER_best_wait() and then destroy the COVER_best_t.
922  */
923 void COVER_best_destroy(COVER_best_t *best) {
924   if (!best) {
925     return;
926   }
927   COVER_best_wait(best);
928   if (best->dict) {
929     free(best->dict);
930   }
931   ZSTD_pthread_mutex_destroy(&best->mutex);
932   ZSTD_pthread_cond_destroy(&best->cond);
933 }
934 
935 /**
936  * Called when a thread is about to be launched.
937  * Increments liveJobs.
938  */
939 void COVER_best_start(COVER_best_t *best) {
940   if (!best) {
941     return;
942   }
943   ZSTD_pthread_mutex_lock(&best->mutex);
944   ++best->liveJobs;
945   ZSTD_pthread_mutex_unlock(&best->mutex);
946 }
947 
948 /**
949  * Called when a thread finishes executing, both on error or success.
950  * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
951  * If this dictionary is the best so far save it and its parameters.
952  */
953 void COVER_best_finish(COVER_best_t* best,
954                       ZDICT_cover_params_t parameters,
955                       COVER_dictSelection_t selection)
956 {
957   void* dict = selection.dictContent;
958   size_t compressedSize = selection.totalCompressedSize;
959   size_t dictSize = selection.dictSize;
960   if (!best) {
961     return;
962   }
963   {
964     size_t liveJobs;
965     ZSTD_pthread_mutex_lock(&best->mutex);
966     --best->liveJobs;
967     liveJobs = best->liveJobs;
968     /* If the new dictionary is better */
969     if (compressedSize < best->compressedSize) {
970       /* Allocate space if necessary */
971       if (!best->dict || best->dictSize < dictSize) {
972         if (best->dict) {
973           free(best->dict);
974         }
975         best->dict = malloc(dictSize);
976         if (!best->dict) {
977           best->compressedSize = ERROR(GENERIC);
978           best->dictSize = 0;
979           ZSTD_pthread_cond_signal(&best->cond);
980           ZSTD_pthread_mutex_unlock(&best->mutex);
981           return;
982         }
983       }
984       /* Save the dictionary, parameters, and size */
985       if (dict) {
986         memcpy(best->dict, dict, dictSize);
987         best->dictSize = dictSize;
988         best->parameters = parameters;
989         best->compressedSize = compressedSize;
990       }
991     }
992     if (liveJobs == 0) {
993       ZSTD_pthread_cond_broadcast(&best->cond);
994     }
995     ZSTD_pthread_mutex_unlock(&best->mutex);
996   }
997 }
998 
999 static COVER_dictSelection_t setDictSelection(BYTE* buf, size_t s, size_t csz)
1000 {
1001     COVER_dictSelection_t ds;
1002     ds.dictContent = buf;
1003     ds.dictSize = s;
1004     ds.totalCompressedSize = csz;
1005     return ds;
1006 }
1007 
1008 COVER_dictSelection_t COVER_dictSelectionError(size_t error) {
1009     return setDictSelection(NULL, 0, error);
1010 }
1011 
1012 unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) {
1013   return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent);
1014 }
1015 
1016 void COVER_dictSelectionFree(COVER_dictSelection_t selection){
1017   free(selection.dictContent);
1018 }
1019 
1020 COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity,
1021         size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
1022         size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) {
1023 
1024   size_t largestDict = 0;
1025   size_t largestCompressed = 0;
1026   BYTE* customDictContentEnd = customDictContent + dictContentSize;
1027 
1028   BYTE* largestDictbuffer = (BYTE*)malloc(dictBufferCapacity);
1029   BYTE* candidateDictBuffer = (BYTE*)malloc(dictBufferCapacity);
1030   double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00;
1031 
1032   if (!largestDictbuffer || !candidateDictBuffer) {
1033     free(largestDictbuffer);
1034     free(candidateDictBuffer);
1035     return COVER_dictSelectionError(dictContentSize);
1036   }
1037 
1038   /* Initial dictionary size and compressed size */
1039   memcpy(largestDictbuffer, customDictContent, dictContentSize);
1040   dictContentSize = ZDICT_finalizeDictionary(
1041     largestDictbuffer, dictBufferCapacity, customDictContent, dictContentSize,
1042     samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
1043 
1044   if (ZDICT_isError(dictContentSize)) {
1045     free(largestDictbuffer);
1046     free(candidateDictBuffer);
1047     return COVER_dictSelectionError(dictContentSize);
1048   }
1049 
1050   totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
1051                                                        samplesBuffer, offsets,
1052                                                        nbCheckSamples, nbSamples,
1053                                                        largestDictbuffer, dictContentSize);
1054 
1055   if (ZSTD_isError(totalCompressedSize)) {
1056     free(largestDictbuffer);
1057     free(candidateDictBuffer);
1058     return COVER_dictSelectionError(totalCompressedSize);
1059   }
1060 
1061   if (params.shrinkDict == 0) {
1062     free(candidateDictBuffer);
1063     return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize);
1064   }
1065 
1066   largestDict = dictContentSize;
1067   largestCompressed = totalCompressedSize;
1068   dictContentSize = ZDICT_DICTSIZE_MIN;
1069 
1070   /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */
1071   while (dictContentSize < largestDict) {
1072     memcpy(candidateDictBuffer, largestDictbuffer, largestDict);
1073     dictContentSize = ZDICT_finalizeDictionary(
1074       candidateDictBuffer, dictBufferCapacity, customDictContentEnd - dictContentSize, dictContentSize,
1075       samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
1076 
1077     if (ZDICT_isError(dictContentSize)) {
1078       free(largestDictbuffer);
1079       free(candidateDictBuffer);
1080       return COVER_dictSelectionError(dictContentSize);
1081 
1082     }
1083 
1084     totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
1085                                                          samplesBuffer, offsets,
1086                                                          nbCheckSamples, nbSamples,
1087                                                          candidateDictBuffer, dictContentSize);
1088 
1089     if (ZSTD_isError(totalCompressedSize)) {
1090       free(largestDictbuffer);
1091       free(candidateDictBuffer);
1092       return COVER_dictSelectionError(totalCompressedSize);
1093     }
1094 
1095     if ((double)totalCompressedSize <= (double)largestCompressed * regressionTolerance) {
1096       free(largestDictbuffer);
1097       return setDictSelection( candidateDictBuffer, dictContentSize, totalCompressedSize );
1098     }
1099     dictContentSize *= 2;
1100   }
1101   dictContentSize = largestDict;
1102   totalCompressedSize = largestCompressed;
1103   free(candidateDictBuffer);
1104   return setDictSelection( largestDictbuffer, dictContentSize, totalCompressedSize );
1105 }
1106 
1107 /**
1108  * Parameters for COVER_tryParameters().
1109  */
1110 typedef struct COVER_tryParameters_data_s {
1111   const COVER_ctx_t *ctx;
1112   COVER_best_t *best;
1113   size_t dictBufferCapacity;
1114   ZDICT_cover_params_t parameters;
1115 } COVER_tryParameters_data_t;
1116 
1117 /**
1118  * Tries a set of parameters and updates the COVER_best_t with the results.
1119  * This function is thread safe if zstd is compiled with multithreaded support.
1120  * It takes its parameters as an *OWNING* opaque pointer to support threading.
1121  */
1122 static void COVER_tryParameters(void *opaque)
1123 {
1124   /* Save parameters as local variables */
1125   COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t*)opaque;
1126   const COVER_ctx_t *const ctx = data->ctx;
1127   const ZDICT_cover_params_t parameters = data->parameters;
1128   size_t dictBufferCapacity = data->dictBufferCapacity;
1129   size_t totalCompressedSize = ERROR(GENERIC);
1130   /* Allocate space for hash table, dict, and freqs */
1131   COVER_map_t activeDmers;
1132   BYTE* const dict = (BYTE*)malloc(dictBufferCapacity);
1133   COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
1134   U32* const freqs = (U32*)malloc(ctx->suffixSize * sizeof(U32));
1135   if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
1136     DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
1137     goto _cleanup;
1138   }
1139   if (!dict || !freqs) {
1140     DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
1141     goto _cleanup;
1142   }
1143   /* Copy the frequencies because we need to modify them */
1144   memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
1145   /* Build the dictionary */
1146   {
1147     const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
1148                                               dictBufferCapacity, parameters);
1149     selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail,
1150         ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
1151         totalCompressedSize);
1152 
1153     if (COVER_dictSelectionIsError(selection)) {
1154       DISPLAYLEVEL(1, "Failed to select dictionary\n");
1155       goto _cleanup;
1156     }
1157   }
1158 _cleanup:
1159   free(dict);
1160   COVER_best_finish(data->best, parameters, selection);
1161   free(data);
1162   COVER_map_destroy(&activeDmers);
1163   COVER_dictSelectionFree(selection);
1164   free(freqs);
1165 }
1166 
1167 ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover(
1168     void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer,
1169     const size_t* samplesSizes, unsigned nbSamples,
1170     ZDICT_cover_params_t* parameters)
1171 {
1172   /* constants */
1173   const unsigned nbThreads = parameters->nbThreads;
1174   const double splitPoint =
1175       parameters->splitPoint <= 0.0 ? COVER_DEFAULT_SPLITPOINT : parameters->splitPoint;
1176   const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
1177   const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
1178   const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
1179   const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
1180   const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
1181   const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
1182   const unsigned kIterations =
1183       (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
1184   const unsigned shrinkDict = 0;
1185   /* Local variables */
1186   const int displayLevel = parameters->zParams.notificationLevel;
1187   unsigned iteration = 1;
1188   unsigned d;
1189   unsigned k;
1190   COVER_best_t best;
1191   POOL_ctx *pool = NULL;
1192   int warned = 0;
1193 
1194   /* Checks */
1195   if (splitPoint <= 0 || splitPoint > 1) {
1196     LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
1197     return ERROR(parameter_outOfBound);
1198   }
1199   if (kMinK < kMaxD || kMaxK < kMinK) {
1200     LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
1201     return ERROR(parameter_outOfBound);
1202   }
1203   if (nbSamples == 0) {
1204     DISPLAYLEVEL(1, "Cover must have at least one input file\n");
1205     return ERROR(srcSize_wrong);
1206   }
1207   if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
1208     DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
1209                  ZDICT_DICTSIZE_MIN);
1210     return ERROR(dstSize_tooSmall);
1211   }
1212   if (nbThreads > 1) {
1213     pool = POOL_create(nbThreads, 1);
1214     if (!pool) {
1215       return ERROR(memory_allocation);
1216     }
1217   }
1218   /* Initialization */
1219   COVER_best_init(&best);
1220   /* Turn down global display level to clean up display at level 2 and below */
1221   g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
1222   /* Loop through d first because each new value needs a new context */
1223   LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
1224                     kIterations);
1225   for (d = kMinD; d <= kMaxD; d += 2) {
1226     /* Initialize the context for this value of d */
1227     COVER_ctx_t ctx;
1228     LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
1229     {
1230       const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint);
1231       if (ZSTD_isError(initVal)) {
1232         LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
1233         COVER_best_destroy(&best);
1234         POOL_free(pool);
1235         return initVal;
1236       }
1237     }
1238     if (!warned) {
1239       COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);
1240       warned = 1;
1241     }
1242     /* Loop through k reusing the same context */
1243     for (k = kMinK; k <= kMaxK; k += kStepSize) {
1244       /* Prepare the arguments */
1245       COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
1246           sizeof(COVER_tryParameters_data_t));
1247       LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
1248       if (!data) {
1249         LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
1250         COVER_best_destroy(&best);
1251         COVER_ctx_destroy(&ctx);
1252         POOL_free(pool);
1253         return ERROR(memory_allocation);
1254       }
1255       data->ctx = &ctx;
1256       data->best = &best;
1257       data->dictBufferCapacity = dictBufferCapacity;
1258       data->parameters = *parameters;
1259       data->parameters.k = k;
1260       data->parameters.d = d;
1261       data->parameters.splitPoint = splitPoint;
1262       data->parameters.steps = kSteps;
1263       data->parameters.shrinkDict = shrinkDict;
1264       data->parameters.zParams.notificationLevel = g_displayLevel;
1265       /* Check the parameters */
1266       if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
1267         DISPLAYLEVEL(1, "Cover parameters incorrect\n");
1268         free(data);
1269         continue;
1270       }
1271       /* Call the function and pass ownership of data to it */
1272       COVER_best_start(&best);
1273       if (pool) {
1274         POOL_add(pool, &COVER_tryParameters, data);
1275       } else {
1276         COVER_tryParameters(data);
1277       }
1278       /* Print status */
1279       LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%%       ",
1280                          (unsigned)((iteration * 100) / kIterations));
1281       ++iteration;
1282     }
1283     COVER_best_wait(&best);
1284     COVER_ctx_destroy(&ctx);
1285   }
1286   LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
1287   /* Fill the output buffer and parameters with output of the best parameters */
1288   {
1289     const size_t dictSize = best.dictSize;
1290     if (ZSTD_isError(best.compressedSize)) {
1291       const size_t compressedSize = best.compressedSize;
1292       COVER_best_destroy(&best);
1293       POOL_free(pool);
1294       return compressedSize;
1295     }
1296     *parameters = best.parameters;
1297     memcpy(dictBuffer, best.dict, dictSize);
1298     COVER_best_destroy(&best);
1299     POOL_free(pool);
1300     return dictSize;
1301   }
1302 }
1303