1/*
2 * Copyright (c) Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11#include "zstd_ldm.h"
12
13#include "../common/debug.h"
14#include <linux/xxhash.h>
15#include "zstd_fast.h"          /* ZSTD_fillHashTable() */
16#include "zstd_double_fast.h"   /* ZSTD_fillDoubleHashTable() */
17#include "zstd_ldm_geartab.h"
18
19#define LDM_BUCKET_SIZE_LOG 3
20#define LDM_MIN_MATCH_LENGTH 64
21#define LDM_HASH_RLOG 7
22
23typedef struct {
24    U64 rolling;
25    U64 stopMask;
26} ldmRollingHashState_t;
27
28/* ZSTD_ldm_gear_init():
29 *
30 * Initializes the rolling hash state such that it will honor the
31 * settings in params. */
32static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
33{
34    unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
35    unsigned hashRateLog = params->hashRateLog;
36
37    state->rolling = ~(U32)0;
38
39    /* The choice of the splitting criterion is subject to two conditions:
40     *   1. it has to trigger on average every 2^(hashRateLog) bytes;
41     *   2. ideally, it has to depend on a window of minMatchLength bytes.
42     *
43     * In the gear hash algorithm, bit n depends on the last n bytes;
44     * so in order to obtain a good quality splitting criterion it is
45     * preferable to use bits with high weight.
46     *
47     * To match condition 1 we use a mask with hashRateLog bits set
48     * and, because of the previous remark, we make sure these bits
49     * have the highest possible weight while still respecting
50     * condition 2.
51     */
52    if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
53        state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
54    } else {
55        /* In this degenerate case we simply honor the hash rate. */
56        state->stopMask = ((U64)1 << hashRateLog) - 1;
57    }
58}
59
60/* ZSTD_ldm_gear_reset()
61 * Feeds [data, data + minMatchLength) into the hash without registering any
62 * splits. This effectively resets the hash state. This is used when skipping
63 * over data, either at the beginning of a block, or skipping sections.
64 */
65static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
66                                BYTE const* data, size_t minMatchLength)
67{
68    U64 hash = state->rolling;
69    size_t n = 0;
70
71#define GEAR_ITER_ONCE() do {                                  \
72        hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
73        n += 1;                                                \
74    } while (0)
75    while (n + 3 < minMatchLength) {
76        GEAR_ITER_ONCE();
77        GEAR_ITER_ONCE();
78        GEAR_ITER_ONCE();
79        GEAR_ITER_ONCE();
80    }
81    while (n < minMatchLength) {
82        GEAR_ITER_ONCE();
83    }
84#undef GEAR_ITER_ONCE
85}
86
87/* ZSTD_ldm_gear_feed():
88 *
89 * Registers in the splits array all the split points found in the first
90 * size bytes following the data pointer. This function terminates when
91 * either all the data has been processed or LDM_BATCH_SIZE splits are
92 * present in the splits array.
93 *
94 * Precondition: The splits array must not be full.
95 * Returns: The number of bytes processed. */
96static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
97                                 BYTE const* data, size_t size,
98                                 size_t* splits, unsigned* numSplits)
99{
100    size_t n;
101    U64 hash, mask;
102
103    hash = state->rolling;
104    mask = state->stopMask;
105    n = 0;
106
107#define GEAR_ITER_ONCE() do { \
108        hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
109        n += 1; \
110        if (UNLIKELY((hash & mask) == 0)) { \
111            splits[*numSplits] = n; \
112            *numSplits += 1; \
113            if (*numSplits == LDM_BATCH_SIZE) \
114                goto done; \
115        } \
116    } while (0)
117
118    while (n + 3 < size) {
119        GEAR_ITER_ONCE();
120        GEAR_ITER_ONCE();
121        GEAR_ITER_ONCE();
122        GEAR_ITER_ONCE();
123    }
124    while (n < size) {
125        GEAR_ITER_ONCE();
126    }
127
128#undef GEAR_ITER_ONCE
129
130done:
131    state->rolling = hash;
132    return n;
133}
134
135void ZSTD_ldm_adjustParameters(ldmParams_t* params,
136                               ZSTD_compressionParameters const* cParams)
137{
138    params->windowLog = cParams->windowLog;
139    ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
140    DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
141    if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
142    if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
143    if (params->hashLog == 0) {
144        params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
145        assert(params->hashLog <= ZSTD_HASHLOG_MAX);
146    }
147    if (params->hashRateLog == 0) {
148        params->hashRateLog = params->windowLog < params->hashLog
149                                   ? 0
150                                   : params->windowLog - params->hashLog;
151    }
152    params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
153}
154
155size_t ZSTD_ldm_getTableSize(ldmParams_t params)
156{
157    size_t const ldmHSize = ((size_t)1) << params.hashLog;
158    size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
159    size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
160    size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
161                           + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
162    return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
163}
164
165size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
166{
167    return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
168}
169
170/* ZSTD_ldm_getBucket() :
171 *  Returns a pointer to the start of the bucket associated with hash. */
172static ldmEntry_t* ZSTD_ldm_getBucket(
173        ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
174{
175    return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
176}
177
178/* ZSTD_ldm_insertEntry() :
179 *  Insert the entry with corresponding hash into the hash table */
180static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
181                                 size_t const hash, const ldmEntry_t entry,
182                                 ldmParams_t const ldmParams)
183{
184    BYTE* const pOffset = ldmState->bucketOffsets + hash;
185    unsigned const offset = *pOffset;
186
187    *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
188    *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
189
190}
191
192/* ZSTD_ldm_countBackwardsMatch() :
193 *  Returns the number of bytes that match backwards before pIn and pMatch.
194 *
195 *  We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
196static size_t ZSTD_ldm_countBackwardsMatch(
197            const BYTE* pIn, const BYTE* pAnchor,
198            const BYTE* pMatch, const BYTE* pMatchBase)
199{
200    size_t matchLength = 0;
201    while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
202        pIn--;
203        pMatch--;
204        matchLength++;
205    }
206    return matchLength;
207}
208
209/* ZSTD_ldm_countBackwardsMatch_2segments() :
210 *  Returns the number of bytes that match backwards from pMatch,
211 *  even with the backwards match spanning 2 different segments.
212 *
213 *  On reaching `pMatchBase`, start counting from mEnd */
214static size_t ZSTD_ldm_countBackwardsMatch_2segments(
215                    const BYTE* pIn, const BYTE* pAnchor,
216                    const BYTE* pMatch, const BYTE* pMatchBase,
217                    const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
218{
219    size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
220    if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
221        /* If backwards match is entirely in the extDict or prefix, immediately return */
222        return matchLength;
223    }
224    DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
225    matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
226    DEBUGLOG(7, "final backwards match length = %zu", matchLength);
227    return matchLength;
228}
229
230/* ZSTD_ldm_fillFastTables() :
231 *
232 *  Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
233 *  This is similar to ZSTD_loadDictionaryContent.
234 *
235 *  The tables for the other strategies are filled within their
236 *  block compressors. */
237static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
238                                      void const* end)
239{
240    const BYTE* const iend = (const BYTE*)end;
241
242    switch(ms->cParams.strategy)
243    {
244    case ZSTD_fast:
245        ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
246        break;
247
248    case ZSTD_dfast:
249        ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
250        break;
251
252    case ZSTD_greedy:
253    case ZSTD_lazy:
254    case ZSTD_lazy2:
255    case ZSTD_btlazy2:
256    case ZSTD_btopt:
257    case ZSTD_btultra:
258    case ZSTD_btultra2:
259        break;
260    default:
261        assert(0);  /* not possible : not a valid strategy id */
262    }
263
264    return 0;
265}
266
267void ZSTD_ldm_fillHashTable(
268            ldmState_t* ldmState, const BYTE* ip,
269            const BYTE* iend, ldmParams_t const* params)
270{
271    U32 const minMatchLength = params->minMatchLength;
272    U32 const hBits = params->hashLog - params->bucketSizeLog;
273    BYTE const* const base = ldmState->window.base;
274    BYTE const* const istart = ip;
275    ldmRollingHashState_t hashState;
276    size_t* const splits = ldmState->splitIndices;
277    unsigned numSplits;
278
279    DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
280
281    ZSTD_ldm_gear_init(&hashState, params);
282    while (ip < iend) {
283        size_t hashed;
284        unsigned n;
285
286        numSplits = 0;
287        hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
288
289        for (n = 0; n < numSplits; n++) {
290            if (ip + splits[n] >= istart + minMatchLength) {
291                BYTE const* const split = ip + splits[n] - minMatchLength;
292                U64 const xxhash = xxh64(split, minMatchLength, 0);
293                U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
294                ldmEntry_t entry;
295
296                entry.offset = (U32)(split - base);
297                entry.checksum = (U32)(xxhash >> 32);
298                ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
299            }
300        }
301
302        ip += hashed;
303    }
304}
305
306
307/* ZSTD_ldm_limitTableUpdate() :
308 *
309 *  Sets cctx->nextToUpdate to a position corresponding closer to anchor
310 *  if it is far way
311 *  (after a long match, only update tables a limited amount). */
312static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
313{
314    U32 const curr = (U32)(anchor - ms->window.base);
315    if (curr > ms->nextToUpdate + 1024) {
316        ms->nextToUpdate =
317            curr - MIN(512, curr - ms->nextToUpdate - 1024);
318    }
319}
320
321static size_t ZSTD_ldm_generateSequences_internal(
322        ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
323        ldmParams_t const* params, void const* src, size_t srcSize)
324{
325    /* LDM parameters */
326    int const extDict = ZSTD_window_hasExtDict(ldmState->window);
327    U32 const minMatchLength = params->minMatchLength;
328    U32 const entsPerBucket = 1U << params->bucketSizeLog;
329    U32 const hBits = params->hashLog - params->bucketSizeLog;
330    /* Prefix and extDict parameters */
331    U32 const dictLimit = ldmState->window.dictLimit;
332    U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
333    BYTE const* const base = ldmState->window.base;
334    BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
335    BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
336    BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
337    BYTE const* const lowPrefixPtr = base + dictLimit;
338    /* Input bounds */
339    BYTE const* const istart = (BYTE const*)src;
340    BYTE const* const iend = istart + srcSize;
341    BYTE const* const ilimit = iend - HASH_READ_SIZE;
342    /* Input positions */
343    BYTE const* anchor = istart;
344    BYTE const* ip = istart;
345    /* Rolling hash state */
346    ldmRollingHashState_t hashState;
347    /* Arrays for staged-processing */
348    size_t* const splits = ldmState->splitIndices;
349    ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
350    unsigned numSplits;
351
352    if (srcSize < minMatchLength)
353        return iend - anchor;
354
355    /* Initialize the rolling hash state with the first minMatchLength bytes */
356    ZSTD_ldm_gear_init(&hashState, params);
357    ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
358    ip += minMatchLength;
359
360    while (ip < ilimit) {
361        size_t hashed;
362        unsigned n;
363
364        numSplits = 0;
365        hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
366                                    splits, &numSplits);
367
368        for (n = 0; n < numSplits; n++) {
369            BYTE const* const split = ip + splits[n] - minMatchLength;
370            U64 const xxhash = xxh64(split, minMatchLength, 0);
371            U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
372
373            candidates[n].split = split;
374            candidates[n].hash = hash;
375            candidates[n].checksum = (U32)(xxhash >> 32);
376            candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
377            PREFETCH_L1(candidates[n].bucket);
378        }
379
380        for (n = 0; n < numSplits; n++) {
381            size_t forwardMatchLength = 0, backwardMatchLength = 0,
382                   bestMatchLength = 0, mLength;
383            U32 offset;
384            BYTE const* const split = candidates[n].split;
385            U32 const checksum = candidates[n].checksum;
386            U32 const hash = candidates[n].hash;
387            ldmEntry_t* const bucket = candidates[n].bucket;
388            ldmEntry_t const* cur;
389            ldmEntry_t const* bestEntry = NULL;
390            ldmEntry_t newEntry;
391
392            newEntry.offset = (U32)(split - base);
393            newEntry.checksum = checksum;
394
395            /* If a split point would generate a sequence overlapping with
396             * the previous one, we merely register it in the hash table and
397             * move on */
398            if (split < anchor) {
399                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
400                continue;
401            }
402
403            for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
404                size_t curForwardMatchLength, curBackwardMatchLength,
405                       curTotalMatchLength;
406                if (cur->checksum != checksum || cur->offset <= lowestIndex) {
407                    continue;
408                }
409                if (extDict) {
410                    BYTE const* const curMatchBase =
411                        cur->offset < dictLimit ? dictBase : base;
412                    BYTE const* const pMatch = curMatchBase + cur->offset;
413                    BYTE const* const matchEnd =
414                        cur->offset < dictLimit ? dictEnd : iend;
415                    BYTE const* const lowMatchPtr =
416                        cur->offset < dictLimit ? dictStart : lowPrefixPtr;
417                    curForwardMatchLength =
418                        ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
419                    if (curForwardMatchLength < minMatchLength) {
420                        continue;
421                    }
422                    curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
423                            split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
424                } else { /* !extDict */
425                    BYTE const* const pMatch = base + cur->offset;
426                    curForwardMatchLength = ZSTD_count(split, pMatch, iend);
427                    if (curForwardMatchLength < minMatchLength) {
428                        continue;
429                    }
430                    curBackwardMatchLength =
431                        ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
432                }
433                curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
434
435                if (curTotalMatchLength > bestMatchLength) {
436                    bestMatchLength = curTotalMatchLength;
437                    forwardMatchLength = curForwardMatchLength;
438                    backwardMatchLength = curBackwardMatchLength;
439                    bestEntry = cur;
440                }
441            }
442
443            /* No match found -- insert an entry into the hash table
444             * and process the next candidate match */
445            if (bestEntry == NULL) {
446                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
447                continue;
448            }
449
450            /* Match found */
451            offset = (U32)(split - base) - bestEntry->offset;
452            mLength = forwardMatchLength + backwardMatchLength;
453            {
454                rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
455
456                /* Out of sequence storage */
457                if (rawSeqStore->size == rawSeqStore->capacity)
458                    return ERROR(dstSize_tooSmall);
459                seq->litLength = (U32)(split - backwardMatchLength - anchor);
460                seq->matchLength = (U32)mLength;
461                seq->offset = offset;
462                rawSeqStore->size++;
463            }
464
465            /* Insert the current entry into the hash table --- it must be
466             * done after the previous block to avoid clobbering bestEntry */
467            ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
468
469            anchor = split + forwardMatchLength;
470
471            /* If we find a match that ends after the data that we've hashed
472             * then we have a repeating, overlapping, pattern. E.g. all zeros.
473             * If one repetition of the pattern matches our `stopMask` then all
474             * repetitions will. We don't need to insert them all into out table,
475             * only the first one. So skip over overlapping matches.
476             * This is a major speed boost (20x) for compressing a single byte
477             * repeated, when that byte ends up in the table.
478             */
479            if (anchor > ip + hashed) {
480                ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
481                /* Continue the outer loop at anchor (ip + hashed == anchor). */
482                ip = anchor - hashed;
483                break;
484            }
485        }
486
487        ip += hashed;
488    }
489
490    return iend - anchor;
491}
492
493/*! ZSTD_ldm_reduceTable() :
494 *  reduce table indexes by `reducerValue` */
495static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
496                                 U32 const reducerValue)
497{
498    U32 u;
499    for (u = 0; u < size; u++) {
500        if (table[u].offset < reducerValue) table[u].offset = 0;
501        else table[u].offset -= reducerValue;
502    }
503}
504
505size_t ZSTD_ldm_generateSequences(
506        ldmState_t* ldmState, rawSeqStore_t* sequences,
507        ldmParams_t const* params, void const* src, size_t srcSize)
508{
509    U32 const maxDist = 1U << params->windowLog;
510    BYTE const* const istart = (BYTE const*)src;
511    BYTE const* const iend = istart + srcSize;
512    size_t const kMaxChunkSize = 1 << 20;
513    size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
514    size_t chunk;
515    size_t leftoverSize = 0;
516
517    assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
518    /* Check that ZSTD_window_update() has been called for this chunk prior
519     * to passing it to this function.
520     */
521    assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
522    /* The input could be very large (in zstdmt), so it must be broken up into
523     * chunks to enforce the maximum distance and handle overflow correction.
524     */
525    assert(sequences->pos <= sequences->size);
526    assert(sequences->size <= sequences->capacity);
527    for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
528        BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
529        size_t const remaining = (size_t)(iend - chunkStart);
530        BYTE const *const chunkEnd =
531            (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
532        size_t const chunkSize = chunkEnd - chunkStart;
533        size_t newLeftoverSize;
534        size_t const prevSize = sequences->size;
535
536        assert(chunkStart < iend);
537        /* 1. Perform overflow correction if necessary. */
538        if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
539            U32 const ldmHSize = 1U << params->hashLog;
540            U32 const correction = ZSTD_window_correctOverflow(
541                &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
542            ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
543            /* invalidate dictionaries on overflow correction */
544            ldmState->loadedDictEnd = 0;
545        }
546        /* 2. We enforce the maximum offset allowed.
547         *
548         * kMaxChunkSize should be small enough that we don't lose too much of
549         * the window through early invalidation.
550         * TODO: * Test the chunk size.
551         *       * Try invalidation after the sequence generation and test the
552         *         the offset against maxDist directly.
553         *
554         * NOTE: Because of dictionaries + sequence splitting we MUST make sure
555         * that any offset used is valid at the END of the sequence, since it may
556         * be split into two sequences. This condition holds when using
557         * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
558         * against maxDist directly, we'll have to carefully handle that case.
559         */
560        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
561        /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
562        newLeftoverSize = ZSTD_ldm_generateSequences_internal(
563            ldmState, sequences, params, chunkStart, chunkSize);
564        if (ZSTD_isError(newLeftoverSize))
565            return newLeftoverSize;
566        /* 4. We add the leftover literals from previous iterations to the first
567         *    newly generated sequence, or add the `newLeftoverSize` if none are
568         *    generated.
569         */
570        /* Prepend the leftover literals from the last call */
571        if (prevSize < sequences->size) {
572            sequences->seq[prevSize].litLength += (U32)leftoverSize;
573            leftoverSize = newLeftoverSize;
574        } else {
575            assert(newLeftoverSize == chunkSize);
576            leftoverSize += chunkSize;
577        }
578    }
579    return 0;
580}
581
582void
583ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
584{
585    while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
586        rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
587        if (srcSize <= seq->litLength) {
588            /* Skip past srcSize literals */
589            seq->litLength -= (U32)srcSize;
590            return;
591        }
592        srcSize -= seq->litLength;
593        seq->litLength = 0;
594        if (srcSize < seq->matchLength) {
595            /* Skip past the first srcSize of the match */
596            seq->matchLength -= (U32)srcSize;
597            if (seq->matchLength < minMatch) {
598                /* The match is too short, omit it */
599                if (rawSeqStore->pos + 1 < rawSeqStore->size) {
600                    seq[1].litLength += seq[0].matchLength;
601                }
602                rawSeqStore->pos++;
603            }
604            return;
605        }
606        srcSize -= seq->matchLength;
607        seq->matchLength = 0;
608        rawSeqStore->pos++;
609    }
610}
611
612/*
613 * If the sequence length is longer than remaining then the sequence is split
614 * between this block and the next.
615 *
616 * Returns the current sequence to handle, or if the rest of the block should
617 * be literals, it returns a sequence with offset == 0.
618 */
619static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
620                                 U32 const remaining, U32 const minMatch)
621{
622    rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
623    assert(sequence.offset > 0);
624    /* Likely: No partial sequence */
625    if (remaining >= sequence.litLength + sequence.matchLength) {
626        rawSeqStore->pos++;
627        return sequence;
628    }
629    /* Cut the sequence short (offset == 0 ==> rest is literals). */
630    if (remaining <= sequence.litLength) {
631        sequence.offset = 0;
632    } else if (remaining < sequence.litLength + sequence.matchLength) {
633        sequence.matchLength = remaining - sequence.litLength;
634        if (sequence.matchLength < minMatch) {
635            sequence.offset = 0;
636        }
637    }
638    /* Skip past `remaining` bytes for the future sequences. */
639    ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
640    return sequence;
641}
642
643void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
644    U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
645    while (currPos && rawSeqStore->pos < rawSeqStore->size) {
646        rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
647        if (currPos >= currSeq.litLength + currSeq.matchLength) {
648            currPos -= currSeq.litLength + currSeq.matchLength;
649            rawSeqStore->pos++;
650        } else {
651            rawSeqStore->posInSequence = currPos;
652            break;
653        }
654    }
655    if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
656        rawSeqStore->posInSequence = 0;
657    }
658}
659
660size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
661    ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
662    ZSTD_paramSwitch_e useRowMatchFinder,
663    void const* src, size_t srcSize)
664{
665    const ZSTD_compressionParameters* const cParams = &ms->cParams;
666    unsigned const minMatch = cParams->minMatch;
667    ZSTD_blockCompressor const blockCompressor =
668        ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
669    /* Input bounds */
670    BYTE const* const istart = (BYTE const*)src;
671    BYTE const* const iend = istart + srcSize;
672    /* Input positions */
673    BYTE const* ip = istart;
674
675    DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
676    /* If using opt parser, use LDMs only as candidates rather than always accepting them */
677    if (cParams->strategy >= ZSTD_btopt) {
678        size_t lastLLSize;
679        ms->ldmSeqStore = rawSeqStore;
680        lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
681        ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
682        return lastLLSize;
683    }
684
685    assert(rawSeqStore->pos <= rawSeqStore->size);
686    assert(rawSeqStore->size <= rawSeqStore->capacity);
687    /* Loop through each sequence and apply the block compressor to the literals */
688    while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
689        /* maybeSplitSequence updates rawSeqStore->pos */
690        rawSeq const sequence = maybeSplitSequence(rawSeqStore,
691                                                   (U32)(iend - ip), minMatch);
692        int i;
693        /* End signal */
694        if (sequence.offset == 0)
695            break;
696
697        assert(ip + sequence.litLength + sequence.matchLength <= iend);
698
699        /* Fill tables for block compressor */
700        ZSTD_ldm_limitTableUpdate(ms, ip);
701        ZSTD_ldm_fillFastTables(ms, ip);
702        /* Run the block compressor */
703        DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
704        {
705            size_t const newLitLength =
706                blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
707            ip += sequence.litLength;
708            /* Update the repcodes */
709            for (i = ZSTD_REP_NUM - 1; i > 0; i--)
710                rep[i] = rep[i-1];
711            rep[0] = sequence.offset;
712            /* Store the sequence */
713            ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
714                          STORE_OFFSET(sequence.offset),
715                          sequence.matchLength);
716            ip += sequence.matchLength;
717        }
718    }
719    /* Fill the tables for the block compressor */
720    ZSTD_ldm_limitTableUpdate(ms, ip);
721    ZSTD_ldm_fillFastTables(ms, ip);
722    /* Compress the last literals */
723    return blockCompressor(ms, seqStore, rep, ip, iend - ip);
724}
725