1/*
2 * Copyright (c) Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11/* *****************************************************************************
12 * Constructs a dictionary using a heuristic based on the following paper:
13 *
14 * Liao, Petri, Moffat, Wirth
15 * Effective Construction of Relative Lempel-Ziv Dictionaries
16 * Published in WWW 2016.
17 *
18 * Adapted from code originally written by @ot (Giuseppe Ottaviano).
19 ******************************************************************************/
20
21/*-*************************************
22*  Dependencies
23***************************************/
24#include <stdio.h>  /* fprintf */
25#include <stdlib.h> /* malloc, free, qsort */
26#include <string.h> /* memset */
27#include <time.h>   /* clock */
28
29#ifndef ZDICT_STATIC_LINKING_ONLY
30#  define ZDICT_STATIC_LINKING_ONLY
31#endif
32
33#include "../common/mem.h" /* read */
34#include "../common/pool.h"
35#include "../common/threading.h"
36#include "../common/zstd_internal.h" /* includes zstd.h */
37#include "../zdict.h"
38#include "cover.h"
39
40/*-*************************************
41*  Constants
42***************************************/
43/**
44* There are 32bit indexes used to ref samples, so limit samples size to 4GB
45* on 64bit builds.
46* For 32bit builds we choose 1 GB.
47* Most 32bit platforms have 2GB user-mode addressable space and we allocate a large
48* contiguous buffer, so 1GB is already a high limit.
49*/
50#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
51#define COVER_DEFAULT_SPLITPOINT 1.0
52
53/*-*************************************
54*  Console display
55***************************************/
56#ifndef LOCALDISPLAYLEVEL
57static int g_displayLevel = 0;
58#endif
59#undef  DISPLAY
60#define DISPLAY(...)                                                           \
61  {                                                                            \
62    fprintf(stderr, __VA_ARGS__);                                              \
63    fflush(stderr);                                                            \
64  }
65#undef  LOCALDISPLAYLEVEL
66#define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \
67  if (displayLevel >= l) {                                                     \
68    DISPLAY(__VA_ARGS__);                                                      \
69  } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
70#undef  DISPLAYLEVEL
71#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
72
73#ifndef LOCALDISPLAYUPDATE
74static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100;
75static clock_t g_time = 0;
76#endif
77#undef  LOCALDISPLAYUPDATE
78#define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \
79  if (displayLevel >= l) {                                                     \
80    if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) {             \
81      g_time = clock();                                                        \
82      DISPLAY(__VA_ARGS__);                                                    \
83    }                                                                          \
84  }
85#undef  DISPLAYUPDATE
86#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
87
88/*-*************************************
89* Hash table
90***************************************
91* A small specialized hash map for storing activeDmers.
92* The map does not resize, so if it becomes full it will loop forever.
93* Thus, the map must be large enough to store every value.
94* The map implements linear probing and keeps its load less than 0.5.
95*/
96
97#define MAP_EMPTY_VALUE ((U32)-1)
98typedef struct COVER_map_pair_t_s {
99  U32 key;
100  U32 value;
101} COVER_map_pair_t;
102
103typedef struct COVER_map_s {
104  COVER_map_pair_t *data;
105  U32 sizeLog;
106  U32 size;
107  U32 sizeMask;
108} COVER_map_t;
109
110/**
111 * Clear the map.
112 */
113static void COVER_map_clear(COVER_map_t *map) {
114  memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
115}
116
117/**
118 * Initializes a map of the given size.
119 * Returns 1 on success and 0 on failure.
120 * The map must be destroyed with COVER_map_destroy().
121 * The map is only guaranteed to be large enough to hold size elements.
122 */
123static int COVER_map_init(COVER_map_t *map, U32 size) {
124  map->sizeLog = ZSTD_highbit32(size) + 2;
125  map->size = (U32)1 << map->sizeLog;
126  map->sizeMask = map->size - 1;
127  map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
128  if (!map->data) {
129    map->sizeLog = 0;
130    map->size = 0;
131    return 0;
132  }
133  COVER_map_clear(map);
134  return 1;
135}
136
137/**
138 * Internal hash function
139 */
140static const U32 COVER_prime4bytes = 2654435761U;
141static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
142  return (key * COVER_prime4bytes) >> (32 - map->sizeLog);
143}
144
145/**
146 * Helper function that returns the index that a key should be placed into.
147 */
148static U32 COVER_map_index(COVER_map_t *map, U32 key) {
149  const U32 hash = COVER_map_hash(map, key);
150  U32 i;
151  for (i = hash;; i = (i + 1) & map->sizeMask) {
152    COVER_map_pair_t *pos = &map->data[i];
153    if (pos->value == MAP_EMPTY_VALUE) {
154      return i;
155    }
156    if (pos->key == key) {
157      return i;
158    }
159  }
160}
161
162/**
163 * Returns the pointer to the value for key.
164 * If key is not in the map, it is inserted and the value is set to 0.
165 * The map must not be full.
166 */
167static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
168  COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
169  if (pos->value == MAP_EMPTY_VALUE) {
170    pos->key = key;
171    pos->value = 0;
172  }
173  return &pos->value;
174}
175
176/**
177 * Deletes key from the map if present.
178 */
179static void COVER_map_remove(COVER_map_t *map, U32 key) {
180  U32 i = COVER_map_index(map, key);
181  COVER_map_pair_t *del = &map->data[i];
182  U32 shift = 1;
183  if (del->value == MAP_EMPTY_VALUE) {
184    return;
185  }
186  for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
187    COVER_map_pair_t *const pos = &map->data[i];
188    /* If the position is empty we are done */
189    if (pos->value == MAP_EMPTY_VALUE) {
190      del->value = MAP_EMPTY_VALUE;
191      return;
192    }
193    /* If pos can be moved to del do so */
194    if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
195      del->key = pos->key;
196      del->value = pos->value;
197      del = pos;
198      shift = 1;
199    } else {
200      ++shift;
201    }
202  }
203}
204
205/**
206 * Destroys a map that is inited with COVER_map_init().
207 */
208static void COVER_map_destroy(COVER_map_t *map) {
209  if (map->data) {
210    free(map->data);
211  }
212  map->data = NULL;
213  map->size = 0;
214}
215
216/*-*************************************
217* Context
218***************************************/
219
220typedef struct {
221  const BYTE *samples;
222  size_t *offsets;
223  const size_t *samplesSizes;
224  size_t nbSamples;
225  size_t nbTrainSamples;
226  size_t nbTestSamples;
227  U32 *suffix;
228  size_t suffixSize;
229  U32 *freqs;
230  U32 *dmerAt;
231  unsigned d;
232} COVER_ctx_t;
233
234/* We need a global context for qsort... */
235static COVER_ctx_t *g_coverCtx = NULL;
236
237/*-*************************************
238*  Helper functions
239***************************************/
240
241/**
242 * Returns the sum of the sample sizes.
243 */
244size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
245  size_t sum = 0;
246  unsigned i;
247  for (i = 0; i < nbSamples; ++i) {
248    sum += samplesSizes[i];
249  }
250  return sum;
251}
252
253/**
254 * Returns -1 if the dmer at lp is less than the dmer at rp.
255 * Return 0 if the dmers at lp and rp are equal.
256 * Returns 1 if the dmer at lp is greater than the dmer at rp.
257 */
258static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
259  U32 const lhs = *(U32 const *)lp;
260  U32 const rhs = *(U32 const *)rp;
261  return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
262}
263/**
264 * Faster version for d <= 8.
265 */
266static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
267  U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
268  U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
269  U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
270  if (lhs < rhs) {
271    return -1;
272  }
273  return (lhs > rhs);
274}
275
276/**
277 * Same as COVER_cmp() except ties are broken by pointer value
278 * NOTE: g_coverCtx must be set to call this function.  A global is required because
279 * qsort doesn't take an opaque pointer.
280 */
281static int WIN_CDECL COVER_strict_cmp(const void *lp, const void *rp) {
282  int result = COVER_cmp(g_coverCtx, lp, rp);
283  if (result == 0) {
284    result = lp < rp ? -1 : 1;
285  }
286  return result;
287}
288/**
289 * Faster version for d <= 8.
290 */
291static int WIN_CDECL COVER_strict_cmp8(const void *lp, const void *rp) {
292  int result = COVER_cmp8(g_coverCtx, lp, rp);
293  if (result == 0) {
294    result = lp < rp ? -1 : 1;
295  }
296  return result;
297}
298
299/**
300 * Returns the first pointer in [first, last) whose element does not compare
301 * less than value.  If no such element exists it returns last.
302 */
303static const size_t *COVER_lower_bound(const size_t *first, const size_t *last,
304                                       size_t value) {
305  size_t count = last - first;
306  while (count != 0) {
307    size_t step = count / 2;
308    const size_t *ptr = first;
309    ptr += step;
310    if (*ptr < value) {
311      first = ++ptr;
312      count -= step + 1;
313    } else {
314      count = step;
315    }
316  }
317  return first;
318}
319
320/**
321 * Generic groupBy function.
322 * Groups an array sorted by cmp into groups with equivalent values.
323 * Calls grp for each group.
324 */
325static void
326COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
327              int (*cmp)(COVER_ctx_t *, const void *, const void *),
328              void (*grp)(COVER_ctx_t *, const void *, const void *)) {
329  const BYTE *ptr = (const BYTE *)data;
330  size_t num = 0;
331  while (num < count) {
332    const BYTE *grpEnd = ptr + size;
333    ++num;
334    while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
335      grpEnd += size;
336      ++num;
337    }
338    grp(ctx, ptr, grpEnd);
339    ptr = grpEnd;
340  }
341}
342
343/*-*************************************
344*  Cover functions
345***************************************/
346
347/**
348 * Called on each group of positions with the same dmer.
349 * Counts the frequency of each dmer and saves it in the suffix array.
350 * Fills `ctx->dmerAt`.
351 */
352static void COVER_group(COVER_ctx_t *ctx, const void *group,
353                        const void *groupEnd) {
354  /* The group consists of all the positions with the same first d bytes. */
355  const U32 *grpPtr = (const U32 *)group;
356  const U32 *grpEnd = (const U32 *)groupEnd;
357  /* The dmerId is how we will reference this dmer.
358   * This allows us to map the whole dmer space to a much smaller space, the
359   * size of the suffix array.
360   */
361  const U32 dmerId = (U32)(grpPtr - ctx->suffix);
362  /* Count the number of samples this dmer shows up in */
363  U32 freq = 0;
364  /* Details */
365  const size_t *curOffsetPtr = ctx->offsets;
366  const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
367  /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
368   * different sample than the last.
369   */
370  size_t curSampleEnd = ctx->offsets[0];
371  for (; grpPtr != grpEnd; ++grpPtr) {
372    /* Save the dmerId for this position so we can get back to it. */
373    ctx->dmerAt[*grpPtr] = dmerId;
374    /* Dictionaries only help for the first reference to the dmer.
375     * After that zstd can reference the match from the previous reference.
376     * So only count each dmer once for each sample it is in.
377     */
378    if (*grpPtr < curSampleEnd) {
379      continue;
380    }
381    freq += 1;
382    /* Binary search to find the end of the sample *grpPtr is in.
383     * In the common case that grpPtr + 1 == grpEnd we can skip the binary
384     * search because the loop is over.
385     */
386    if (grpPtr + 1 != grpEnd) {
387      const size_t *sampleEndPtr =
388          COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
389      curSampleEnd = *sampleEndPtr;
390      curOffsetPtr = sampleEndPtr + 1;
391    }
392  }
393  /* At this point we are never going to look at this segment of the suffix
394   * array again.  We take advantage of this fact to save memory.
395   * We store the frequency of the dmer in the first position of the group,
396   * which is dmerId.
397   */
398  ctx->suffix[dmerId] = freq;
399}
400
401
402/**
403 * Selects the best segment in an epoch.
404 * Segments of are scored according to the function:
405 *
406 * Let F(d) be the frequency of dmer d.
407 * Let S_i be the dmer at position i of segment S which has length k.
408 *
409 *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
410 *
411 * Once the dmer d is in the dictionary we set F(d) = 0.
412 */
413static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
414                                           COVER_map_t *activeDmers, U32 begin,
415                                           U32 end,
416                                           ZDICT_cover_params_t parameters) {
417  /* Constants */
418  const U32 k = parameters.k;
419  const U32 d = parameters.d;
420  const U32 dmersInK = k - d + 1;
421  /* Try each segment (activeSegment) and save the best (bestSegment) */
422  COVER_segment_t bestSegment = {0, 0, 0};
423  COVER_segment_t activeSegment;
424  /* Reset the activeDmers in the segment */
425  COVER_map_clear(activeDmers);
426  /* The activeSegment starts at the beginning of the epoch. */
427  activeSegment.begin = begin;
428  activeSegment.end = begin;
429  activeSegment.score = 0;
430  /* Slide the activeSegment through the whole epoch.
431   * Save the best segment in bestSegment.
432   */
433  while (activeSegment.end < end) {
434    /* The dmerId for the dmer at the next position */
435    U32 newDmer = ctx->dmerAt[activeSegment.end];
436    /* The entry in activeDmers for this dmerId */
437    U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
438    /* If the dmer isn't already present in the segment add its score. */
439    if (*newDmerOcc == 0) {
440      /* The paper suggest using the L-0.5 norm, but experiments show that it
441       * doesn't help.
442       */
443      activeSegment.score += freqs[newDmer];
444    }
445    /* Add the dmer to the segment */
446    activeSegment.end += 1;
447    *newDmerOcc += 1;
448
449    /* If the window is now too large, drop the first position */
450    if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
451      U32 delDmer = ctx->dmerAt[activeSegment.begin];
452      U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
453      activeSegment.begin += 1;
454      *delDmerOcc -= 1;
455      /* If this is the last occurrence of the dmer, subtract its score */
456      if (*delDmerOcc == 0) {
457        COVER_map_remove(activeDmers, delDmer);
458        activeSegment.score -= freqs[delDmer];
459      }
460    }
461
462    /* If this segment is the best so far save it */
463    if (activeSegment.score > bestSegment.score) {
464      bestSegment = activeSegment;
465    }
466  }
467  {
468    /* Trim off the zero frequency head and tail from the segment. */
469    U32 newBegin = bestSegment.end;
470    U32 newEnd = bestSegment.begin;
471    U32 pos;
472    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
473      U32 freq = freqs[ctx->dmerAt[pos]];
474      if (freq != 0) {
475        newBegin = MIN(newBegin, pos);
476        newEnd = pos + 1;
477      }
478    }
479    bestSegment.begin = newBegin;
480    bestSegment.end = newEnd;
481  }
482  {
483    /* Zero out the frequency of each dmer covered by the chosen segment. */
484    U32 pos;
485    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
486      freqs[ctx->dmerAt[pos]] = 0;
487    }
488  }
489  return bestSegment;
490}
491
492/**
493 * Check the validity of the parameters.
494 * Returns non-zero if the parameters are valid and 0 otherwise.
495 */
496static int COVER_checkParameters(ZDICT_cover_params_t parameters,
497                                 size_t maxDictSize) {
498  /* k and d are required parameters */
499  if (parameters.d == 0 || parameters.k == 0) {
500    return 0;
501  }
502  /* k <= maxDictSize */
503  if (parameters.k > maxDictSize) {
504    return 0;
505  }
506  /* d <= k */
507  if (parameters.d > parameters.k) {
508    return 0;
509  }
510  /* 0 < splitPoint <= 1 */
511  if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){
512    return 0;
513  }
514  return 1;
515}
516
517/**
518 * Clean up a context initialized with `COVER_ctx_init()`.
519 */
520static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
521  if (!ctx) {
522    return;
523  }
524  if (ctx->suffix) {
525    free(ctx->suffix);
526    ctx->suffix = NULL;
527  }
528  if (ctx->freqs) {
529    free(ctx->freqs);
530    ctx->freqs = NULL;
531  }
532  if (ctx->dmerAt) {
533    free(ctx->dmerAt);
534    ctx->dmerAt = NULL;
535  }
536  if (ctx->offsets) {
537    free(ctx->offsets);
538    ctx->offsets = NULL;
539  }
540}
541
542/**
543 * Prepare a context for dictionary building.
544 * The context is only dependent on the parameter `d` and can used multiple
545 * times.
546 * Returns 0 on success or error code on error.
547 * The context must be destroyed with `COVER_ctx_destroy()`.
548 */
549static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
550                          const size_t *samplesSizes, unsigned nbSamples,
551                          unsigned d, double splitPoint) {
552  const BYTE *const samples = (const BYTE *)samplesBuffer;
553  const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
554  /* Split samples into testing and training sets */
555  const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
556  const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
557  const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
558  const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
559  /* Checks */
560  if (totalSamplesSize < MAX(d, sizeof(U64)) ||
561      totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
562    DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
563                 (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
564    return ERROR(srcSize_wrong);
565  }
566  /* Check if there are at least 5 training samples */
567  if (nbTrainSamples < 5) {
568    DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples);
569    return ERROR(srcSize_wrong);
570  }
571  /* Check if there's testing sample */
572  if (nbTestSamples < 1) {
573    DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples);
574    return ERROR(srcSize_wrong);
575  }
576  /* Zero the context */
577  memset(ctx, 0, sizeof(*ctx));
578  DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
579               (unsigned)trainingSamplesSize);
580  DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
581               (unsigned)testSamplesSize);
582  ctx->samples = samples;
583  ctx->samplesSizes = samplesSizes;
584  ctx->nbSamples = nbSamples;
585  ctx->nbTrainSamples = nbTrainSamples;
586  ctx->nbTestSamples = nbTestSamples;
587  /* Partial suffix array */
588  ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
589  ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
590  /* Maps index to the dmerID */
591  ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
592  /* The offsets of each file */
593  ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
594  if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
595    DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
596    COVER_ctx_destroy(ctx);
597    return ERROR(memory_allocation);
598  }
599  ctx->freqs = NULL;
600  ctx->d = d;
601
602  /* Fill offsets from the samplesSizes */
603  {
604    U32 i;
605    ctx->offsets[0] = 0;
606    for (i = 1; i <= nbSamples; ++i) {
607      ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
608    }
609  }
610  DISPLAYLEVEL(2, "Constructing partial suffix array\n");
611  {
612    /* suffix is a partial suffix array.
613     * It only sorts suffixes by their first parameters.d bytes.
614     * The sort is stable, so each dmer group is sorted by position in input.
615     */
616    U32 i;
617    for (i = 0; i < ctx->suffixSize; ++i) {
618      ctx->suffix[i] = i;
619    }
620    /* qsort doesn't take an opaque pointer, so pass as a global.
621     * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is.
622     */
623    g_coverCtx = ctx;
624#if defined(__OpenBSD__)
625    mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
626          (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
627#else
628    qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
629          (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
630#endif
631  }
632  DISPLAYLEVEL(2, "Computing frequencies\n");
633  /* For each dmer group (group of positions with the same first d bytes):
634   * 1. For each position we set dmerAt[position] = dmerID.  The dmerID is
635   *    (groupBeginPtr - suffix).  This allows us to go from position to
636   *    dmerID so we can look up values in freq.
637   * 2. We calculate how many samples the dmer occurs in and save it in
638   *    freqs[dmerId].
639   */
640  COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
641                (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
642  ctx->freqs = ctx->suffix;
643  ctx->suffix = NULL;
644  return 0;
645}
646
647void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)
648{
649  const double ratio = (double)nbDmers / maxDictSize;
650  if (ratio >= 10) {
651      return;
652  }
653  LOCALDISPLAYLEVEL(displayLevel, 1,
654                    "WARNING: The maximum dictionary size %u is too large "
655                    "compared to the source size %u! "
656                    "size(source)/size(dictionary) = %f, but it should be >= "
657                    "10! This may lead to a subpar dictionary! We recommend "
658                    "training on sources at least 10x, and preferably 100x "
659                    "the size of the dictionary! \n", (U32)maxDictSize,
660                    (U32)nbDmers, ratio);
661}
662
663COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize,
664                                       U32 nbDmers, U32 k, U32 passes)
665{
666  const U32 minEpochSize = k * 10;
667  COVER_epoch_info_t epochs;
668  epochs.num = MAX(1, maxDictSize / k / passes);
669  epochs.size = nbDmers / epochs.num;
670  if (epochs.size >= minEpochSize) {
671      assert(epochs.size * epochs.num <= nbDmers);
672      return epochs;
673  }
674  epochs.size = MIN(minEpochSize, nbDmers);
675  epochs.num = nbDmers / epochs.size;
676  assert(epochs.size * epochs.num <= nbDmers);
677  return epochs;
678}
679
680/**
681 * Given the prepared context build the dictionary.
682 */
683static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
684                                    COVER_map_t *activeDmers, void *dictBuffer,
685                                    size_t dictBufferCapacity,
686                                    ZDICT_cover_params_t parameters) {
687  BYTE *const dict = (BYTE *)dictBuffer;
688  size_t tail = dictBufferCapacity;
689  /* Divide the data into epochs. We will select one segment from each epoch. */
690  const COVER_epoch_info_t epochs = COVER_computeEpochs(
691      (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4);
692  const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3));
693  size_t zeroScoreRun = 0;
694  size_t epoch;
695  DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
696                (U32)epochs.num, (U32)epochs.size);
697  /* Loop through the epochs until there are no more segments or the dictionary
698   * is full.
699   */
700  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
701    const U32 epochBegin = (U32)(epoch * epochs.size);
702    const U32 epochEnd = epochBegin + epochs.size;
703    size_t segmentSize;
704    /* Select a segment */
705    COVER_segment_t segment = COVER_selectSegment(
706        ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
707    /* If the segment covers no dmers, then we are out of content.
708     * There may be new content in other epochs, for continue for some time.
709     */
710    if (segment.score == 0) {
711      if (++zeroScoreRun >= maxZeroScoreRun) {
712          break;
713      }
714      continue;
715    }
716    zeroScoreRun = 0;
717    /* Trim the segment if necessary and if it is too small then we are done */
718    segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
719    if (segmentSize < parameters.d) {
720      break;
721    }
722    /* We fill the dictionary from the back to allow the best segments to be
723     * referenced with the smallest offsets.
724     */
725    tail -= segmentSize;
726    memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
727    DISPLAYUPDATE(
728        2, "\r%u%%       ",
729        (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
730  }
731  DISPLAYLEVEL(2, "\r%79s\r", "");
732  return tail;
733}
734
735ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
736    void *dictBuffer, size_t dictBufferCapacity,
737    const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
738    ZDICT_cover_params_t parameters)
739{
740  BYTE* const dict = (BYTE*)dictBuffer;
741  COVER_ctx_t ctx;
742  COVER_map_t activeDmers;
743  parameters.splitPoint = 1.0;
744  /* Initialize global data */
745  g_displayLevel = (int)parameters.zParams.notificationLevel;
746  /* Checks */
747  if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
748    DISPLAYLEVEL(1, "Cover parameters incorrect\n");
749    return ERROR(parameter_outOfBound);
750  }
751  if (nbSamples == 0) {
752    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
753    return ERROR(srcSize_wrong);
754  }
755  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
756    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
757                 ZDICT_DICTSIZE_MIN);
758    return ERROR(dstSize_tooSmall);
759  }
760  /* Initialize context and activeDmers */
761  {
762    size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
763                      parameters.d, parameters.splitPoint);
764    if (ZSTD_isError(initVal)) {
765      return initVal;
766    }
767  }
768  COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel);
769  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
770    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
771    COVER_ctx_destroy(&ctx);
772    return ERROR(memory_allocation);
773  }
774
775  DISPLAYLEVEL(2, "Building dictionary\n");
776  {
777    const size_t tail =
778        COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
779                              dictBufferCapacity, parameters);
780    const size_t dictionarySize = ZDICT_finalizeDictionary(
781        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
782        samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
783    if (!ZSTD_isError(dictionarySize)) {
784      DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
785                   (unsigned)dictionarySize);
786    }
787    COVER_ctx_destroy(&ctx);
788    COVER_map_destroy(&activeDmers);
789    return dictionarySize;
790  }
791}
792
793
794
795size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
796                                    const size_t *samplesSizes, const BYTE *samples,
797                                    size_t *offsets,
798                                    size_t nbTrainSamples, size_t nbSamples,
799                                    BYTE *const dict, size_t dictBufferCapacity) {
800  size_t totalCompressedSize = ERROR(GENERIC);
801  /* Pointers */
802  ZSTD_CCtx *cctx;
803  ZSTD_CDict *cdict;
804  void *dst;
805  /* Local variables */
806  size_t dstCapacity;
807  size_t i;
808  /* Allocate dst with enough space to compress the maximum sized sample */
809  {
810    size_t maxSampleSize = 0;
811    i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
812    for (; i < nbSamples; ++i) {
813      maxSampleSize = MAX(samplesSizes[i], maxSampleSize);
814    }
815    dstCapacity = ZSTD_compressBound(maxSampleSize);
816    dst = malloc(dstCapacity);
817  }
818  /* Create the cctx and cdict */
819  cctx = ZSTD_createCCtx();
820  cdict = ZSTD_createCDict(dict, dictBufferCapacity,
821                           parameters.zParams.compressionLevel);
822  if (!dst || !cctx || !cdict) {
823    goto _compressCleanup;
824  }
825  /* Compress each sample and sum their sizes (or error) */
826  totalCompressedSize = dictBufferCapacity;
827  i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
828  for (; i < nbSamples; ++i) {
829    const size_t size = ZSTD_compress_usingCDict(
830        cctx, dst, dstCapacity, samples + offsets[i],
831        samplesSizes[i], cdict);
832    if (ZSTD_isError(size)) {
833      totalCompressedSize = size;
834      goto _compressCleanup;
835    }
836    totalCompressedSize += size;
837  }
838_compressCleanup:
839  ZSTD_freeCCtx(cctx);
840  ZSTD_freeCDict(cdict);
841  if (dst) {
842    free(dst);
843  }
844  return totalCompressedSize;
845}
846
847
848/**
849 * Initialize the `COVER_best_t`.
850 */
851void COVER_best_init(COVER_best_t *best) {
852  if (best==NULL) return; /* compatible with init on NULL */
853  (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
854  (void)ZSTD_pthread_cond_init(&best->cond, NULL);
855  best->liveJobs = 0;
856  best->dict = NULL;
857  best->dictSize = 0;
858  best->compressedSize = (size_t)-1;
859  memset(&best->parameters, 0, sizeof(best->parameters));
860}
861
862/**
863 * Wait until liveJobs == 0.
864 */
865void COVER_best_wait(COVER_best_t *best) {
866  if (!best) {
867    return;
868  }
869  ZSTD_pthread_mutex_lock(&best->mutex);
870  while (best->liveJobs != 0) {
871    ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
872  }
873  ZSTD_pthread_mutex_unlock(&best->mutex);
874}
875
876/**
877 * Call COVER_best_wait() and then destroy the COVER_best_t.
878 */
879void COVER_best_destroy(COVER_best_t *best) {
880  if (!best) {
881    return;
882  }
883  COVER_best_wait(best);
884  if (best->dict) {
885    free(best->dict);
886  }
887  ZSTD_pthread_mutex_destroy(&best->mutex);
888  ZSTD_pthread_cond_destroy(&best->cond);
889}
890
891/**
892 * Called when a thread is about to be launched.
893 * Increments liveJobs.
894 */
895void COVER_best_start(COVER_best_t *best) {
896  if (!best) {
897    return;
898  }
899  ZSTD_pthread_mutex_lock(&best->mutex);
900  ++best->liveJobs;
901  ZSTD_pthread_mutex_unlock(&best->mutex);
902}
903
904/**
905 * Called when a thread finishes executing, both on error or success.
906 * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
907 * If this dictionary is the best so far save it and its parameters.
908 */
909void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters,
910                              COVER_dictSelection_t selection) {
911  void* dict = selection.dictContent;
912  size_t compressedSize = selection.totalCompressedSize;
913  size_t dictSize = selection.dictSize;
914  if (!best) {
915    return;
916  }
917  {
918    size_t liveJobs;
919    ZSTD_pthread_mutex_lock(&best->mutex);
920    --best->liveJobs;
921    liveJobs = best->liveJobs;
922    /* If the new dictionary is better */
923    if (compressedSize < best->compressedSize) {
924      /* Allocate space if necessary */
925      if (!best->dict || best->dictSize < dictSize) {
926        if (best->dict) {
927          free(best->dict);
928        }
929        best->dict = malloc(dictSize);
930        if (!best->dict) {
931          best->compressedSize = ERROR(GENERIC);
932          best->dictSize = 0;
933          ZSTD_pthread_cond_signal(&best->cond);
934          ZSTD_pthread_mutex_unlock(&best->mutex);
935          return;
936        }
937      }
938      /* Save the dictionary, parameters, and size */
939      if (dict) {
940        memcpy(best->dict, dict, dictSize);
941        best->dictSize = dictSize;
942        best->parameters = parameters;
943        best->compressedSize = compressedSize;
944      }
945    }
946    if (liveJobs == 0) {
947      ZSTD_pthread_cond_broadcast(&best->cond);
948    }
949    ZSTD_pthread_mutex_unlock(&best->mutex);
950  }
951}
952
953COVER_dictSelection_t COVER_dictSelectionError(size_t error) {
954    COVER_dictSelection_t selection = { NULL, 0, error };
955    return selection;
956}
957
958unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) {
959  return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent);
960}
961
962void COVER_dictSelectionFree(COVER_dictSelection_t selection){
963  free(selection.dictContent);
964}
965
966COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity,
967        size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
968        size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) {
969
970  size_t largestDict = 0;
971  size_t largestCompressed = 0;
972  BYTE* customDictContentEnd = customDictContent + dictContentSize;
973
974  BYTE * largestDictbuffer = (BYTE *)malloc(dictBufferCapacity);
975  BYTE * candidateDictBuffer = (BYTE *)malloc(dictBufferCapacity);
976  double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00;
977
978  if (!largestDictbuffer || !candidateDictBuffer) {
979    free(largestDictbuffer);
980    free(candidateDictBuffer);
981    return COVER_dictSelectionError(dictContentSize);
982  }
983
984  /* Initial dictionary size and compressed size */
985  memcpy(largestDictbuffer, customDictContent, dictContentSize);
986  dictContentSize = ZDICT_finalizeDictionary(
987    largestDictbuffer, dictBufferCapacity, customDictContent, dictContentSize,
988    samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
989
990  if (ZDICT_isError(dictContentSize)) {
991    free(largestDictbuffer);
992    free(candidateDictBuffer);
993    return COVER_dictSelectionError(dictContentSize);
994  }
995
996  totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
997                                                       samplesBuffer, offsets,
998                                                       nbCheckSamples, nbSamples,
999                                                       largestDictbuffer, dictContentSize);
1000
1001  if (ZSTD_isError(totalCompressedSize)) {
1002    free(largestDictbuffer);
1003    free(candidateDictBuffer);
1004    return COVER_dictSelectionError(totalCompressedSize);
1005  }
1006
1007  if (params.shrinkDict == 0) {
1008    COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };
1009    free(candidateDictBuffer);
1010    return selection;
1011  }
1012
1013  largestDict = dictContentSize;
1014  largestCompressed = totalCompressedSize;
1015  dictContentSize = ZDICT_DICTSIZE_MIN;
1016
1017  /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */
1018  while (dictContentSize < largestDict) {
1019    memcpy(candidateDictBuffer, largestDictbuffer, largestDict);
1020    dictContentSize = ZDICT_finalizeDictionary(
1021      candidateDictBuffer, dictBufferCapacity, customDictContentEnd - dictContentSize, dictContentSize,
1022      samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
1023
1024    if (ZDICT_isError(dictContentSize)) {
1025      free(largestDictbuffer);
1026      free(candidateDictBuffer);
1027      return COVER_dictSelectionError(dictContentSize);
1028
1029    }
1030
1031    totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
1032                                                         samplesBuffer, offsets,
1033                                                         nbCheckSamples, nbSamples,
1034                                                         candidateDictBuffer, dictContentSize);
1035
1036    if (ZSTD_isError(totalCompressedSize)) {
1037      free(largestDictbuffer);
1038      free(candidateDictBuffer);
1039      return COVER_dictSelectionError(totalCompressedSize);
1040    }
1041
1042    if (totalCompressedSize <= largestCompressed * regressionTolerance) {
1043      COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize };
1044      free(largestDictbuffer);
1045      return selection;
1046    }
1047    dictContentSize *= 2;
1048  }
1049  dictContentSize = largestDict;
1050  totalCompressedSize = largestCompressed;
1051  {
1052    COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize };
1053    free(candidateDictBuffer);
1054    return selection;
1055  }
1056}
1057
1058/**
1059 * Parameters for COVER_tryParameters().
1060 */
1061typedef struct COVER_tryParameters_data_s {
1062  const COVER_ctx_t *ctx;
1063  COVER_best_t *best;
1064  size_t dictBufferCapacity;
1065  ZDICT_cover_params_t parameters;
1066} COVER_tryParameters_data_t;
1067
1068/**
1069 * Tries a set of parameters and updates the COVER_best_t with the results.
1070 * This function is thread safe if zstd is compiled with multithreaded support.
1071 * It takes its parameters as an *OWNING* opaque pointer to support threading.
1072 */
1073static void COVER_tryParameters(void *opaque)
1074{
1075  /* Save parameters as local variables */
1076  COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t*)opaque;
1077  const COVER_ctx_t *const ctx = data->ctx;
1078  const ZDICT_cover_params_t parameters = data->parameters;
1079  size_t dictBufferCapacity = data->dictBufferCapacity;
1080  size_t totalCompressedSize = ERROR(GENERIC);
1081  /* Allocate space for hash table, dict, and freqs */
1082  COVER_map_t activeDmers;
1083  BYTE* const dict = (BYTE*)malloc(dictBufferCapacity);
1084  COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
1085  U32* const freqs = (U32*)malloc(ctx->suffixSize * sizeof(U32));
1086  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
1087    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
1088    goto _cleanup;
1089  }
1090  if (!dict || !freqs) {
1091    DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
1092    goto _cleanup;
1093  }
1094  /* Copy the frequencies because we need to modify them */
1095  memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
1096  /* Build the dictionary */
1097  {
1098    const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
1099                                              dictBufferCapacity, parameters);
1100    selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail,
1101        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
1102        totalCompressedSize);
1103
1104    if (COVER_dictSelectionIsError(selection)) {
1105      DISPLAYLEVEL(1, "Failed to select dictionary\n");
1106      goto _cleanup;
1107    }
1108  }
1109_cleanup:
1110  free(dict);
1111  COVER_best_finish(data->best, parameters, selection);
1112  free(data);
1113  COVER_map_destroy(&activeDmers);
1114  COVER_dictSelectionFree(selection);
1115  free(freqs);
1116}
1117
1118ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
1119    void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer,
1120    const size_t* samplesSizes, unsigned nbSamples,
1121    ZDICT_cover_params_t* parameters)
1122{
1123  /* constants */
1124  const unsigned nbThreads = parameters->nbThreads;
1125  const double splitPoint =
1126      parameters->splitPoint <= 0.0 ? COVER_DEFAULT_SPLITPOINT : parameters->splitPoint;
1127  const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
1128  const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
1129  const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
1130  const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
1131  const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
1132  const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
1133  const unsigned kIterations =
1134      (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
1135  const unsigned shrinkDict = 0;
1136  /* Local variables */
1137  const int displayLevel = parameters->zParams.notificationLevel;
1138  unsigned iteration = 1;
1139  unsigned d;
1140  unsigned k;
1141  COVER_best_t best;
1142  POOL_ctx *pool = NULL;
1143  int warned = 0;
1144
1145  /* Checks */
1146  if (splitPoint <= 0 || splitPoint > 1) {
1147    LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
1148    return ERROR(parameter_outOfBound);
1149  }
1150  if (kMinK < kMaxD || kMaxK < kMinK) {
1151    LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
1152    return ERROR(parameter_outOfBound);
1153  }
1154  if (nbSamples == 0) {
1155    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
1156    return ERROR(srcSize_wrong);
1157  }
1158  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
1159    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
1160                 ZDICT_DICTSIZE_MIN);
1161    return ERROR(dstSize_tooSmall);
1162  }
1163  if (nbThreads > 1) {
1164    pool = POOL_create(nbThreads, 1);
1165    if (!pool) {
1166      return ERROR(memory_allocation);
1167    }
1168  }
1169  /* Initialization */
1170  COVER_best_init(&best);
1171  /* Turn down global display level to clean up display at level 2 and below */
1172  g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
1173  /* Loop through d first because each new value needs a new context */
1174  LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
1175                    kIterations);
1176  for (d = kMinD; d <= kMaxD; d += 2) {
1177    /* Initialize the context for this value of d */
1178    COVER_ctx_t ctx;
1179    LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
1180    {
1181      const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint);
1182      if (ZSTD_isError(initVal)) {
1183        LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
1184        COVER_best_destroy(&best);
1185        POOL_free(pool);
1186        return initVal;
1187      }
1188    }
1189    if (!warned) {
1190      COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);
1191      warned = 1;
1192    }
1193    /* Loop through k reusing the same context */
1194    for (k = kMinK; k <= kMaxK; k += kStepSize) {
1195      /* Prepare the arguments */
1196      COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
1197          sizeof(COVER_tryParameters_data_t));
1198      LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
1199      if (!data) {
1200        LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
1201        COVER_best_destroy(&best);
1202        COVER_ctx_destroy(&ctx);
1203        POOL_free(pool);
1204        return ERROR(memory_allocation);
1205      }
1206      data->ctx = &ctx;
1207      data->best = &best;
1208      data->dictBufferCapacity = dictBufferCapacity;
1209      data->parameters = *parameters;
1210      data->parameters.k = k;
1211      data->parameters.d = d;
1212      data->parameters.splitPoint = splitPoint;
1213      data->parameters.steps = kSteps;
1214      data->parameters.shrinkDict = shrinkDict;
1215      data->parameters.zParams.notificationLevel = g_displayLevel;
1216      /* Check the parameters */
1217      if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
1218        DISPLAYLEVEL(1, "Cover parameters incorrect\n");
1219        free(data);
1220        continue;
1221      }
1222      /* Call the function and pass ownership of data to it */
1223      COVER_best_start(&best);
1224      if (pool) {
1225        POOL_add(pool, &COVER_tryParameters, data);
1226      } else {
1227        COVER_tryParameters(data);
1228      }
1229      /* Print status */
1230      LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%%       ",
1231                         (unsigned)((iteration * 100) / kIterations));
1232      ++iteration;
1233    }
1234    COVER_best_wait(&best);
1235    COVER_ctx_destroy(&ctx);
1236  }
1237  LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
1238  /* Fill the output buffer and parameters with output of the best parameters */
1239  {
1240    const size_t dictSize = best.dictSize;
1241    if (ZSTD_isError(best.compressedSize)) {
1242      const size_t compressedSize = best.compressedSize;
1243      COVER_best_destroy(&best);
1244      POOL_free(pool);
1245      return compressedSize;
1246    }
1247    *parameters = best.parameters;
1248    memcpy(dictBuffer, best.dict, dictSize);
1249    COVER_best_destroy(&best);
1250    POOL_free(pool);
1251    return dictSize;
1252  }
1253}
1254