linux/lib/zstd/compress/zstd_opt.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
   3 * All rights reserved.
   4 *
   5 * This source code is licensed under both the BSD-style license (found in the
   6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
   7 * in the COPYING file in the root directory of this source tree).
   8 * You may select, at your option, one of the above-listed licenses.
   9 */
  10
  11/*
  12 * Disable inlining for the optimal parser for the kernel build.
  13 * It is unlikely to be used in the kernel, and where it is used
  14 * latency shouldn't matter because it is very slow to begin with.
  15 * We prefer a ~180KB binary size win over faster optimal parsing.
  16 *
  17 * TODO(https://github.com/facebook/zstd/issues/2862):
  18 * Improve the code size of the optimal parser in general, so we
  19 * don't need this hack for the kernel build.
  20 */
  21#define ZSTD_NO_INLINE 1
  22
  23#include "zstd_compress_internal.h"
  24#include "hist.h"
  25#include "zstd_opt.h"
  26
  27
  28#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
  29#define ZSTD_FREQ_DIV       4   /* log factor when using previous stats to init next stats */
  30#define ZSTD_MAX_PRICE     (1<<30)
  31
  32#define ZSTD_PREDEF_THRESHOLD 1024   /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
  33
  34
  35/*-*************************************
  36*  Price functions for optimal parser
  37***************************************/
  38
  39#if 0    /* approximation at bit level */
  40#  define BITCOST_ACCURACY 0
  41#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
  42#  define WEIGHT(stat)  ((void)opt, ZSTD_bitWeight(stat))
  43#elif 0  /* fractional bit accuracy */
  44#  define BITCOST_ACCURACY 8
  45#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
  46#  define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
  47#else    /* opt==approx, ultra==accurate */
  48#  define BITCOST_ACCURACY 8
  49#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
  50#  define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
  51#endif
  52
  53MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
  54{
  55    return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
  56}
  57
  58MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
  59{
  60    U32 const stat = rawStat + 1;
  61    U32 const hb = ZSTD_highbit32(stat);
  62    U32 const BWeight = hb * BITCOST_MULTIPLIER;
  63    U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
  64    U32 const weight = BWeight + FWeight;
  65    assert(hb + BITCOST_ACCURACY < 31);
  66    return weight;
  67}
  68
  69#if (DEBUGLEVEL>=2)
  70/* debugging function,
  71 * @return price in bytes as fractional value
  72 * for debug messages only */
  73MEM_STATIC double ZSTD_fCost(U32 price)
  74{
  75    return (double)price / (BITCOST_MULTIPLIER*8);
  76}
  77#endif
  78
  79static int ZSTD_compressedLiterals(optState_t const* const optPtr)
  80{
  81    return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
  82}
  83
  84static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
  85{
  86    if (ZSTD_compressedLiterals(optPtr))
  87        optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
  88    optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
  89    optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
  90    optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
  91}
  92
  93
  94/* ZSTD_downscaleStat() :
  95 * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
  96 * return the resulting sum of elements */
  97static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
  98{
  99    U32 s, sum=0;
 100    DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
 101    assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
 102    for (s=0; s<lastEltIndex+1; s++) {
 103        table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
 104        sum += table[s];
 105    }
 106    return sum;
 107}
 108
 109/* ZSTD_rescaleFreqs() :
 110 * if first block (detected by optPtr->litLengthSum == 0) : init statistics
 111 *    take hints from dictionary if there is one
 112 *    or init from zero, using src for literals stats, or flat 1 for match symbols
 113 * otherwise downscale existing stats, to be used as seed for next block.
 114 */
 115static void
 116ZSTD_rescaleFreqs(optState_t* const optPtr,
 117            const BYTE* const src, size_t const srcSize,
 118                  int const optLevel)
 119{
 120    int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
 121    DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
 122    optPtr->priceType = zop_dynamic;
 123
 124    if (optPtr->litLengthSum == 0) {  /* first block : init */
 125        if (srcSize <= ZSTD_PREDEF_THRESHOLD) {  /* heuristic */
 126            DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
 127            optPtr->priceType = zop_predef;
 128        }
 129
 130        assert(optPtr->symbolCosts != NULL);
 131        if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
 132            /* huffman table presumed generated by dictionary */
 133            optPtr->priceType = zop_dynamic;
 134
 135            if (compressedLiterals) {
 136                unsigned lit;
 137                assert(optPtr->litFreq != NULL);
 138                optPtr->litSum = 0;
 139                for (lit=0; lit<=MaxLit; lit++) {
 140                    U32 const scaleLog = 11;   /* scale to 2K */
 141                    U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
 142                    assert(bitCost <= scaleLog);
 143                    optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
 144                    optPtr->litSum += optPtr->litFreq[lit];
 145            }   }
 146
 147            {   unsigned ll;
 148                FSE_CState_t llstate;
 149                FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
 150                optPtr->litLengthSum = 0;
 151                for (ll=0; ll<=MaxLL; ll++) {
 152                    U32 const scaleLog = 10;   /* scale to 1K */
 153                    U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
 154                    assert(bitCost < scaleLog);
 155                    optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
 156                    optPtr->litLengthSum += optPtr->litLengthFreq[ll];
 157            }   }
 158
 159            {   unsigned ml;
 160                FSE_CState_t mlstate;
 161                FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
 162                optPtr->matchLengthSum = 0;
 163                for (ml=0; ml<=MaxML; ml++) {
 164                    U32 const scaleLog = 10;
 165                    U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
 166                    assert(bitCost < scaleLog);
 167                    optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
 168                    optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
 169            }   }
 170
 171            {   unsigned of;
 172                FSE_CState_t ofstate;
 173                FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
 174                optPtr->offCodeSum = 0;
 175                for (of=0; of<=MaxOff; of++) {
 176                    U32 const scaleLog = 10;
 177                    U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
 178                    assert(bitCost < scaleLog);
 179                    optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
 180                    optPtr->offCodeSum += optPtr->offCodeFreq[of];
 181            }   }
 182
 183        } else {  /* not a dictionary */
 184
 185            assert(optPtr->litFreq != NULL);
 186            if (compressedLiterals) {
 187                unsigned lit = MaxLit;
 188                HIST_count_simple(optPtr->litFreq, &lit, src, srcSize);   /* use raw first block to init statistics */
 189                optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
 190            }
 191
 192            {   unsigned ll;
 193                for (ll=0; ll<=MaxLL; ll++)
 194                    optPtr->litLengthFreq[ll] = 1;
 195            }
 196            optPtr->litLengthSum = MaxLL+1;
 197
 198            {   unsigned ml;
 199                for (ml=0; ml<=MaxML; ml++)
 200                    optPtr->matchLengthFreq[ml] = 1;
 201            }
 202            optPtr->matchLengthSum = MaxML+1;
 203
 204            {   unsigned of;
 205                for (of=0; of<=MaxOff; of++)
 206                    optPtr->offCodeFreq[of] = 1;
 207            }
 208            optPtr->offCodeSum = MaxOff+1;
 209
 210        }
 211
 212    } else {   /* new block : re-use previous statistics, scaled down */
 213
 214        if (compressedLiterals)
 215            optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
 216        optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
 217        optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
 218        optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
 219    }
 220
 221    ZSTD_setBasePrices(optPtr, optLevel);
 222}
 223
 224/* ZSTD_rawLiteralsCost() :
 225 * price of literals (only) in specified segment (which length can be 0).
 226 * does not include price of literalLength symbol */
 227static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
 228                                const optState_t* const optPtr,
 229                                int optLevel)
 230{
 231    if (litLength == 0) return 0;
 232
 233    if (!ZSTD_compressedLiterals(optPtr))
 234        return (litLength << 3) * BITCOST_MULTIPLIER;  /* Uncompressed - 8 bytes per literal. */
 235
 236    if (optPtr->priceType == zop_predef)
 237        return (litLength*6) * BITCOST_MULTIPLIER;  /* 6 bit per literal - no statistic used */
 238
 239    /* dynamic statistics */
 240    {   U32 price = litLength * optPtr->litSumBasePrice;
 241        U32 u;
 242        for (u=0; u < litLength; u++) {
 243            assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice);   /* literal cost should never be negative */
 244            price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
 245        }
 246        return price;
 247    }
 248}
 249
 250/* ZSTD_litLengthPrice() :
 251 * cost of literalLength symbol */
 252static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
 253{
 254    if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
 255
 256    /* dynamic statistics */
 257    {   U32 const llCode = ZSTD_LLcode(litLength);
 258        return (LL_bits[llCode] * BITCOST_MULTIPLIER)
 259             + optPtr->litLengthSumBasePrice
 260             - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
 261    }
 262}
 263
 264/* ZSTD_getMatchPrice() :
 265 * Provides the cost of the match part (offset + matchLength) of a sequence
 266 * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
 267 * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
 268FORCE_INLINE_TEMPLATE U32
 269ZSTD_getMatchPrice(U32 const offset,
 270                   U32 const matchLength,
 271             const optState_t* const optPtr,
 272                   int const optLevel)
 273{
 274    U32 price;
 275    U32 const offCode = ZSTD_highbit32(offset+1);
 276    U32 const mlBase = matchLength - MINMATCH;
 277    assert(matchLength >= MINMATCH);
 278
 279    if (optPtr->priceType == zop_predef)  /* fixed scheme, do not use statistics */
 280        return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
 281
 282    /* dynamic statistics */
 283    price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
 284    if ((optLevel<2) /*static*/ && offCode >= 20)
 285        price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
 286
 287    /* match Length */
 288    {   U32 const mlCode = ZSTD_MLcode(mlBase);
 289        price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
 290    }
 291
 292    price += BITCOST_MULTIPLIER / 5;   /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
 293
 294    DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
 295    return price;
 296}
 297
 298/* ZSTD_updateStats() :
 299 * assumption : literals + litLengtn <= iend */
 300static void ZSTD_updateStats(optState_t* const optPtr,
 301                             U32 litLength, const BYTE* literals,
 302                             U32 offsetCode, U32 matchLength)
 303{
 304    /* literals */
 305    if (ZSTD_compressedLiterals(optPtr)) {
 306        U32 u;
 307        for (u=0; u < litLength; u++)
 308            optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
 309        optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
 310    }
 311
 312    /* literal Length */
 313    {   U32 const llCode = ZSTD_LLcode(litLength);
 314        optPtr->litLengthFreq[llCode]++;
 315        optPtr->litLengthSum++;
 316    }
 317
 318    /* match offset code (0-2=>repCode; 3+=>offset+2) */
 319    {   U32 const offCode = ZSTD_highbit32(offsetCode+1);
 320        assert(offCode <= MaxOff);
 321        optPtr->offCodeFreq[offCode]++;
 322        optPtr->offCodeSum++;
 323    }
 324
 325    /* match Length */
 326    {   U32 const mlBase = matchLength - MINMATCH;
 327        U32 const mlCode = ZSTD_MLcode(mlBase);
 328        optPtr->matchLengthFreq[mlCode]++;
 329        optPtr->matchLengthSum++;
 330    }
 331}
 332
 333
 334/* ZSTD_readMINMATCH() :
 335 * function safe only for comparisons
 336 * assumption : memPtr must be at least 4 bytes before end of buffer */
 337MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
 338{
 339    switch (length)
 340    {
 341    default :
 342    case 4 : return MEM_read32(memPtr);
 343    case 3 : if (MEM_isLittleEndian())
 344                return MEM_read32(memPtr)<<8;
 345             else
 346                return MEM_read32(memPtr)>>8;
 347    }
 348}
 349
 350
 351/* Update hashTable3 up to ip (excluded)
 352   Assumption : always within prefix (i.e. not within extDict) */
 353static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
 354                                              U32* nextToUpdate3,
 355                                              const BYTE* const ip)
 356{
 357    U32* const hashTable3 = ms->hashTable3;
 358    U32 const hashLog3 = ms->hashLog3;
 359    const BYTE* const base = ms->window.base;
 360    U32 idx = *nextToUpdate3;
 361    U32 const target = (U32)(ip - base);
 362    size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
 363    assert(hashLog3 > 0);
 364
 365    while(idx < target) {
 366        hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
 367        idx++;
 368    }
 369
 370    *nextToUpdate3 = target;
 371    return hashTable3[hash3];
 372}
 373
 374
 375/*-*************************************
 376*  Binary Tree search
 377***************************************/
 378/* ZSTD_insertBt1() : add one or multiple positions to tree.
 379 *  ip : assumed <= iend-8 .
 380 * @return : nb of positions added */
 381static U32 ZSTD_insertBt1(
 382                ZSTD_matchState_t* ms,
 383                const BYTE* const ip, const BYTE* const iend,
 384                U32 const mls, const int extDict)
 385{
 386    const ZSTD_compressionParameters* const cParams = &ms->cParams;
 387    U32*   const hashTable = ms->hashTable;
 388    U32    const hashLog = cParams->hashLog;
 389    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
 390    U32*   const bt = ms->chainTable;
 391    U32    const btLog  = cParams->chainLog - 1;
 392    U32    const btMask = (1 << btLog) - 1;
 393    U32 matchIndex = hashTable[h];
 394    size_t commonLengthSmaller=0, commonLengthLarger=0;
 395    const BYTE* const base = ms->window.base;
 396    const BYTE* const dictBase = ms->window.dictBase;
 397    const U32 dictLimit = ms->window.dictLimit;
 398    const BYTE* const dictEnd = dictBase + dictLimit;
 399    const BYTE* const prefixStart = base + dictLimit;
 400    const BYTE* match;
 401    const U32 curr = (U32)(ip-base);
 402    const U32 btLow = btMask >= curr ? 0 : curr - btMask;
 403    U32* smallerPtr = bt + 2*(curr&btMask);
 404    U32* largerPtr  = smallerPtr + 1;
 405    U32 dummy32;   /* to be nullified at the end */
 406    U32 const windowLow = ms->window.lowLimit;
 407    U32 matchEndIdx = curr+8+1;
 408    size_t bestLength = 8;
 409    U32 nbCompares = 1U << cParams->searchLog;
 410#ifdef ZSTD_C_PREDICT
 411    U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
 412    U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
 413    predictedSmall += (predictedSmall>0);
 414    predictedLarge += (predictedLarge>0);
 415#endif /* ZSTD_C_PREDICT */
 416
 417    DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
 418
 419    assert(ip <= iend-8);   /* required for h calculation */
 420    hashTable[h] = curr;   /* Update Hash Table */
 421
 422    assert(windowLow > 0);
 423    for (; nbCompares && (matchIndex >= windowLow); --nbCompares) {
 424        U32* const nextPtr = bt + 2*(matchIndex & btMask);
 425        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
 426        assert(matchIndex < curr);
 427
 428#ifdef ZSTD_C_PREDICT   /* note : can create issues when hlog small <= 11 */
 429        const U32* predictPtr = bt + 2*((matchIndex-1) & btMask);   /* written this way, as bt is a roll buffer */
 430        if (matchIndex == predictedSmall) {
 431            /* no need to check length, result known */
 432            *smallerPtr = matchIndex;
 433            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
 434            smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
 435            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
 436            predictedSmall = predictPtr[1] + (predictPtr[1]>0);
 437            continue;
 438        }
 439        if (matchIndex == predictedLarge) {
 440            *largerPtr = matchIndex;
 441            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
 442            largerPtr = nextPtr;
 443            matchIndex = nextPtr[0];
 444            predictedLarge = predictPtr[0] + (predictPtr[0]>0);
 445            continue;
 446        }
 447#endif
 448
 449        if (!extDict || (matchIndex+matchLength >= dictLimit)) {
 450            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if actually extDict */
 451            match = base + matchIndex;
 452            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
 453        } else {
 454            match = dictBase + matchIndex;
 455            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
 456            if (matchIndex+matchLength >= dictLimit)
 457                match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
 458        }
 459
 460        if (matchLength > bestLength) {
 461            bestLength = matchLength;
 462            if (matchLength > matchEndIdx - matchIndex)
 463                matchEndIdx = matchIndex + (U32)matchLength;
 464        }
 465
 466        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
 467            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
 468        }
 469
 470        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
 471            /* match is smaller than current */
 472            *smallerPtr = matchIndex;             /* update smaller idx */
 473            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
 474            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
 475            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
 476            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
 477        } else {
 478            /* match is larger than current */
 479            *largerPtr = matchIndex;
 480            commonLengthLarger = matchLength;
 481            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
 482            largerPtr = nextPtr;
 483            matchIndex = nextPtr[0];
 484    }   }
 485
 486    *smallerPtr = *largerPtr = 0;
 487    {   U32 positions = 0;
 488        if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384));   /* speed optimization */
 489        assert(matchEndIdx > curr + 8);
 490        return MAX(positions, matchEndIdx - (curr + 8));
 491    }
 492}
 493
 494FORCE_INLINE_TEMPLATE
 495void ZSTD_updateTree_internal(
 496                ZSTD_matchState_t* ms,
 497                const BYTE* const ip, const BYTE* const iend,
 498                const U32 mls, const ZSTD_dictMode_e dictMode)
 499{
 500    const BYTE* const base = ms->window.base;
 501    U32 const target = (U32)(ip - base);
 502    U32 idx = ms->nextToUpdate;
 503    DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u  (dictMode:%u)",
 504                idx, target, dictMode);
 505
 506    while(idx < target) {
 507        U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
 508        assert(idx < (U32)(idx + forward));
 509        idx += forward;
 510    }
 511    assert((size_t)(ip - base) <= (size_t)(U32)(-1));
 512    assert((size_t)(iend - base) <= (size_t)(U32)(-1));
 513    ms->nextToUpdate = target;
 514}
 515
 516void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
 517    ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
 518}
 519
 520FORCE_INLINE_TEMPLATE
 521U32 ZSTD_insertBtAndGetAllMatches (
 522                    ZSTD_match_t* matches,   /* store result (found matches) in this table (presumed large enough) */
 523                    ZSTD_matchState_t* ms,
 524                    U32* nextToUpdate3,
 525                    const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
 526                    const U32 rep[ZSTD_REP_NUM],
 527                    U32 const ll0,   /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
 528                    const U32 lengthToBeat,
 529                    U32 const mls /* template */)
 530{
 531    const ZSTD_compressionParameters* const cParams = &ms->cParams;
 532    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
 533    const BYTE* const base = ms->window.base;
 534    U32 const curr = (U32)(ip-base);
 535    U32 const hashLog = cParams->hashLog;
 536    U32 const minMatch = (mls==3) ? 3 : 4;
 537    U32* const hashTable = ms->hashTable;
 538    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
 539    U32 matchIndex  = hashTable[h];
 540    U32* const bt   = ms->chainTable;
 541    U32 const btLog = cParams->chainLog - 1;
 542    U32 const btMask= (1U << btLog) - 1;
 543    size_t commonLengthSmaller=0, commonLengthLarger=0;
 544    const BYTE* const dictBase = ms->window.dictBase;
 545    U32 const dictLimit = ms->window.dictLimit;
 546    const BYTE* const dictEnd = dictBase + dictLimit;
 547    const BYTE* const prefixStart = base + dictLimit;
 548    U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
 549    U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
 550    U32 const matchLow = windowLow ? windowLow : 1;
 551    U32* smallerPtr = bt + 2*(curr&btMask);
 552    U32* largerPtr  = bt + 2*(curr&btMask) + 1;
 553    U32 matchEndIdx = curr+8+1;   /* farthest referenced position of any match => detects repetitive patterns */
 554    U32 dummy32;   /* to be nullified at the end */
 555    U32 mnum = 0;
 556    U32 nbCompares = 1U << cParams->searchLog;
 557
 558    const ZSTD_matchState_t* dms    = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
 559    const ZSTD_compressionParameters* const dmsCParams =
 560                                      dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
 561    const BYTE* const dmsBase       = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
 562    const BYTE* const dmsEnd        = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
 563    U32         const dmsHighLimit  = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
 564    U32         const dmsLowLimit   = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
 565    U32         const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
 566    U32         const dmsHashLog    = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
 567    U32         const dmsBtLog      = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
 568    U32         const dmsBtMask     = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
 569    U32         const dmsBtLow      = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
 570
 571    size_t bestLength = lengthToBeat-1;
 572    DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
 573
 574    /* check repCode */
 575    assert(ll0 <= 1);   /* necessarily 1 or 0 */
 576    {   U32 const lastR = ZSTD_REP_NUM + ll0;
 577        U32 repCode;
 578        for (repCode = ll0; repCode < lastR; repCode++) {
 579            U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
 580            U32 const repIndex = curr - repOffset;
 581            U32 repLen = 0;
 582            assert(curr >= dictLimit);
 583            if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) {  /* equivalent to `curr > repIndex >= dictLimit` */
 584                /* We must validate the repcode offset because when we're using a dictionary the
 585                 * valid offset range shrinks when the dictionary goes out of bounds.
 586                 */
 587                if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
 588                    repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
 589                }
 590            } else {  /* repIndex < dictLimit || repIndex >= curr */
 591                const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
 592                                             dmsBase + repIndex - dmsIndexDelta :
 593                                             dictBase + repIndex;
 594                assert(curr >= windowLow);
 595                if ( dictMode == ZSTD_extDict
 596                  && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow)  /* equivalent to `curr > repIndex >= windowLow` */
 597                     & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
 598                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
 599                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
 600                }
 601                if (dictMode == ZSTD_dictMatchState
 602                  && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta))  /* equivalent to `curr > repIndex >= dmsLowLimit` */
 603                     & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
 604                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
 605                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
 606            }   }
 607            /* save longer solution */
 608            if (repLen > bestLength) {
 609                DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
 610                            repCode, ll0, repOffset, repLen);
 611                bestLength = repLen;
 612                matches[mnum].off = repCode - ll0;
 613                matches[mnum].len = (U32)repLen;
 614                mnum++;
 615                if ( (repLen > sufficient_len)
 616                   | (ip+repLen == iLimit) ) {  /* best possible */
 617                    return mnum;
 618    }   }   }   }
 619
 620    /* HC3 match finder */
 621    if ((mls == 3) /*static*/ && (bestLength < mls)) {
 622        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
 623        if ((matchIndex3 >= matchLow)
 624          & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
 625            size_t mlen;
 626            if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
 627                const BYTE* const match = base + matchIndex3;
 628                mlen = ZSTD_count(ip, match, iLimit);
 629            } else {
 630                const BYTE* const match = dictBase + matchIndex3;
 631                mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
 632            }
 633
 634            /* save best solution */
 635            if (mlen >= mls /* == 3 > bestLength */) {
 636                DEBUGLOG(8, "found small match with hlog3, of length %u",
 637                            (U32)mlen);
 638                bestLength = mlen;
 639                assert(curr > matchIndex3);
 640                assert(mnum==0);  /* no prior solution */
 641                matches[0].off = (curr - matchIndex3) + ZSTD_REP_MOVE;
 642                matches[0].len = (U32)mlen;
 643                mnum = 1;
 644                if ( (mlen > sufficient_len) |
 645                     (ip+mlen == iLimit) ) {  /* best possible length */
 646                    ms->nextToUpdate = curr+1;  /* skip insertion */
 647                    return 1;
 648        }   }   }
 649        /* no dictMatchState lookup: dicts don't have a populated HC3 table */
 650    }
 651
 652    hashTable[h] = curr;   /* Update Hash Table */
 653
 654    for (; nbCompares && (matchIndex >= matchLow); --nbCompares) {
 655        U32* const nextPtr = bt + 2*(matchIndex & btMask);
 656        const BYTE* match;
 657        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
 658        assert(curr > matchIndex);
 659
 660        if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
 661            assert(matchIndex+matchLength >= dictLimit);  /* ensure the condition is correct when !extDict */
 662            match = base + matchIndex;
 663            if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
 664            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
 665        } else {
 666            match = dictBase + matchIndex;
 667            assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
 668            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
 669            if (matchIndex+matchLength >= dictLimit)
 670                match = base + matchIndex;   /* prepare for match[matchLength] read */
 671        }
 672
 673        if (matchLength > bestLength) {
 674            DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
 675                    (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
 676            assert(matchEndIdx > matchIndex);
 677            if (matchLength > matchEndIdx - matchIndex)
 678                matchEndIdx = matchIndex + (U32)matchLength;
 679            bestLength = matchLength;
 680            matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
 681            matches[mnum].len = (U32)matchLength;
 682            mnum++;
 683            if ( (matchLength > ZSTD_OPT_NUM)
 684               | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
 685                if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
 686                break; /* drop, to preserve bt consistency (miss a little bit of compression) */
 687            }
 688        }
 689
 690        if (match[matchLength] < ip[matchLength]) {
 691            /* match smaller than current */
 692            *smallerPtr = matchIndex;             /* update smaller idx */
 693            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
 694            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
 695            smallerPtr = nextPtr+1;               /* new candidate => larger than match, which was smaller than current */
 696            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous, closer to current */
 697        } else {
 698            *largerPtr = matchIndex;
 699            commonLengthLarger = matchLength;
 700            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
 701            largerPtr = nextPtr;
 702            matchIndex = nextPtr[0];
 703    }   }
 704
 705    *smallerPtr = *largerPtr = 0;
 706
 707    assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
 708    if (dictMode == ZSTD_dictMatchState && nbCompares) {
 709        size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
 710        U32 dictMatchIndex = dms->hashTable[dmsH];
 711        const U32* const dmsBt = dms->chainTable;
 712        commonLengthSmaller = commonLengthLarger = 0;
 713        for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) {
 714            const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
 715            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
 716            const BYTE* match = dmsBase + dictMatchIndex;
 717            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
 718            if (dictMatchIndex+matchLength >= dmsHighLimit)
 719                match = base + dictMatchIndex + dmsIndexDelta;   /* to prepare for next usage of match[matchLength] */
 720
 721            if (matchLength > bestLength) {
 722                matchIndex = dictMatchIndex + dmsIndexDelta;
 723                DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
 724                        (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
 725                if (matchLength > matchEndIdx - matchIndex)
 726                    matchEndIdx = matchIndex + (U32)matchLength;
 727                bestLength = matchLength;
 728                matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
 729                matches[mnum].len = (U32)matchLength;
 730                mnum++;
 731                if ( (matchLength > ZSTD_OPT_NUM)
 732                   | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
 733                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
 734                }
 735            }
 736
 737            if (dictMatchIndex <= dmsBtLow) { break; }   /* beyond tree size, stop the search */
 738            if (match[matchLength] < ip[matchLength]) {
 739                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
 740                dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
 741            } else {
 742                /* match is larger than current */
 743                commonLengthLarger = matchLength;
 744                dictMatchIndex = nextPtr[0];
 745            }
 746        }
 747    }
 748
 749    assert(matchEndIdx > curr+8);
 750    ms->nextToUpdate = matchEndIdx - 8;  /* skip repetitive patterns */
 751    return mnum;
 752}
 753
 754
 755FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
 756                        ZSTD_match_t* matches,   /* store result (match found, increasing size) in this table */
 757                        ZSTD_matchState_t* ms,
 758                        U32* nextToUpdate3,
 759                        const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
 760                        const U32 rep[ZSTD_REP_NUM],
 761                        U32 const ll0,
 762                        U32 const lengthToBeat)
 763{
 764    const ZSTD_compressionParameters* const cParams = &ms->cParams;
 765    U32 const matchLengthSearch = cParams->minMatch;
 766    DEBUGLOG(8, "ZSTD_BtGetAllMatches");
 767    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
 768    ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
 769    switch(matchLengthSearch)
 770    {
 771    case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
 772    default :
 773    case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
 774    case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
 775    case 7 :
 776    case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
 777    }
 778}
 779
 780/* ***********************
 781*  LDM helper functions  *
 782*************************/
 783
 784/* Struct containing info needed to make decision about ldm inclusion */
 785typedef struct {
 786    rawSeqStore_t seqStore;         /* External match candidates store for this block */
 787    U32 startPosInBlock;            /* Start position of the current match candidate */
 788    U32 endPosInBlock;              /* End position of the current match candidate */
 789    U32 offset;                     /* Offset of the match candidate */
 790} ZSTD_optLdm_t;
 791
 792/* ZSTD_optLdm_skipRawSeqStoreBytes():
 793 * Moves forward in rawSeqStore by nbBytes, which will update the fields 'pos' and 'posInSequence'.
 794 */
 795static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
 796    U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
 797    while (currPos && rawSeqStore->pos < rawSeqStore->size) {
 798        rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
 799        if (currPos >= currSeq.litLength + currSeq.matchLength) {
 800            currPos -= currSeq.litLength + currSeq.matchLength;
 801            rawSeqStore->pos++;
 802        } else {
 803            rawSeqStore->posInSequence = currPos;
 804            break;
 805        }
 806    }
 807    if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
 808        rawSeqStore->posInSequence = 0;
 809    }
 810}
 811
 812/* ZSTD_opt_getNextMatchAndUpdateSeqStore():
 813 * Calculates the beginning and end of the next match in the current block.
 814 * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
 815 */
 816static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
 817                                                   U32 blockBytesRemaining) {
 818    rawSeq currSeq;
 819    U32 currBlockEndPos;
 820    U32 literalsBytesRemaining;
 821    U32 matchBytesRemaining;
 822
 823    /* Setting match end position to MAX to ensure we never use an LDM during this block */
 824    if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
 825        optLdm->startPosInBlock = UINT_MAX;
 826        optLdm->endPosInBlock = UINT_MAX;
 827        return;
 828    }
 829    /* Calculate appropriate bytes left in matchLength and litLength after adjusting
 830       based on ldmSeqStore->posInSequence */
 831    currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
 832    assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
 833    currBlockEndPos = currPosInBlock + blockBytesRemaining;
 834    literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
 835            currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
 836            0;
 837    matchBytesRemaining = (literalsBytesRemaining == 0) ?
 838            currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
 839            currSeq.matchLength;
 840
 841    /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
 842    if (literalsBytesRemaining >= blockBytesRemaining) {
 843        optLdm->startPosInBlock = UINT_MAX;
 844        optLdm->endPosInBlock = UINT_MAX;
 845        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
 846        return;
 847    }
 848
 849    /* Matches may be < MINMATCH by this process. In that case, we will reject them
 850       when we are deciding whether or not to add the ldm */
 851    optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
 852    optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
 853    optLdm->offset = currSeq.offset;
 854
 855    if (optLdm->endPosInBlock > currBlockEndPos) {
 856        /* Match ends after the block ends, we can't use the whole match */
 857        optLdm->endPosInBlock = currBlockEndPos;
 858        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
 859    } else {
 860        /* Consume nb of bytes equal to size of sequence left */
 861        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
 862    }
 863}
 864
 865/* ZSTD_optLdm_maybeAddMatch():
 866 * Adds a match if it's long enough, based on it's 'matchStartPosInBlock'
 867 * and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches'
 868 */
 869static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
 870                                      ZSTD_optLdm_t* optLdm, U32 currPosInBlock) {
 871    U32 posDiff = currPosInBlock - optLdm->startPosInBlock;
 872    /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
 873    U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
 874    U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE;
 875
 876    /* Ensure that current block position is not outside of the match */
 877    if (currPosInBlock < optLdm->startPosInBlock
 878      || currPosInBlock >= optLdm->endPosInBlock
 879      || candidateMatchLength < MINMATCH) {
 880        return;
 881    }
 882
 883    if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
 884        DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
 885                 candidateOffCode, candidateMatchLength, currPosInBlock);
 886        matches[*nbMatches].len = candidateMatchLength;
 887        matches[*nbMatches].off = candidateOffCode;
 888        (*nbMatches)++;
 889    }
 890}
 891
 892/* ZSTD_optLdm_processMatchCandidate():
 893 * Wrapper function to update ldm seq store and call ldm functions as necessary.
 894 */
 895static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches,
 896                                              U32 currPosInBlock, U32 remainingBytes) {
 897    if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
 898        return;
 899    }
 900
 901    if (currPosInBlock >= optLdm->endPosInBlock) {
 902        if (currPosInBlock > optLdm->endPosInBlock) {
 903            /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
 904             * at the end of a match from the ldm seq store, and will often be some bytes
 905             * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
 906             */
 907            U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock;
 908            ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
 909        } 
 910        ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
 911    }
 912    ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
 913}
 914
 915/*-*******************************
 916*  Optimal parser
 917*********************************/
 918
 919
 920static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
 921{
 922    return sol.litlen + sol.mlen;
 923}
 924
 925#if 0 /* debug */
 926
 927static void
 928listStats(const U32* table, int lastEltID)
 929{
 930    int const nbElts = lastEltID + 1;
 931    int enb;
 932    for (enb=0; enb < nbElts; enb++) {
 933        (void)table;
 934        /* RAWLOG(2, "%3i:%3i,  ", enb, table[enb]); */
 935        RAWLOG(2, "%4i,", table[enb]);
 936    }
 937    RAWLOG(2, " \n");
 938}
 939
 940#endif
 941
 942FORCE_INLINE_TEMPLATE size_t
 943ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
 944                               seqStore_t* seqStore,
 945                               U32 rep[ZSTD_REP_NUM],
 946                         const void* src, size_t srcSize,
 947                         const int optLevel,
 948                         const ZSTD_dictMode_e dictMode)
 949{
 950    optState_t* const optStatePtr = &ms->opt;
 951    const BYTE* const istart = (const BYTE*)src;
 952    const BYTE* ip = istart;
 953    const BYTE* anchor = istart;
 954    const BYTE* const iend = istart + srcSize;
 955    const BYTE* const ilimit = iend - 8;
 956    const BYTE* const base = ms->window.base;
 957    const BYTE* const prefixStart = base + ms->window.dictLimit;
 958    const ZSTD_compressionParameters* const cParams = &ms->cParams;
 959
 960    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
 961    U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
 962    U32 nextToUpdate3 = ms->nextToUpdate;
 963
 964    ZSTD_optimal_t* const opt = optStatePtr->priceTable;
 965    ZSTD_match_t* const matches = optStatePtr->matchTable;
 966    ZSTD_optimal_t lastSequence;
 967    ZSTD_optLdm_t optLdm;
 968
 969    optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
 970    optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
 971    ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
 972
 973    /* init */
 974    DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
 975                (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
 976    assert(optLevel <= 2);
 977    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
 978    ip += (ip==prefixStart);
 979
 980    /* Match Loop */
 981    while (ip < ilimit) {
 982        U32 cur, last_pos = 0;
 983
 984        /* find first match */
 985        {   U32 const litlen = (U32)(ip - anchor);
 986            U32 const ll0 = !litlen;
 987            U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
 988            ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
 989                                              (U32)(ip-istart), (U32)(iend - ip));
 990            if (!nbMatches) { ip++; continue; }
 991
 992            /* initialize opt[0] */
 993            { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
 994            opt[0].mlen = 0;  /* means is_a_literal */
 995            opt[0].litlen = litlen;
 996            /* We don't need to include the actual price of the literals because
 997             * it is static for the duration of the forward pass, and is included
 998             * in every price. We include the literal length to avoid negative
 999             * prices when we subtract the previous literal length.
1000             */
1001            opt[0].price = ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
1002
1003            /* large match -> immediate encoding */
1004            {   U32 const maxML = matches[nbMatches-1].len;
1005                U32 const maxOffset = matches[nbMatches-1].off;
1006                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
1007                            nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
1008
1009                if (maxML > sufficient_len) {
1010                    lastSequence.litlen = litlen;
1011                    lastSequence.mlen = maxML;
1012                    lastSequence.off = maxOffset;
1013                    DEBUGLOG(6, "large match (%u>%u), immediate encoding",
1014                                maxML, sufficient_len);
1015                    cur = 0;
1016                    last_pos = ZSTD_totalLen(lastSequence);
1017                    goto _shortestPath;
1018            }   }
1019
1020            /* set prices for first matches starting position == 0 */
1021            {   U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
1022                U32 pos;
1023                U32 matchNb;
1024                for (pos = 1; pos < minMatch; pos++) {
1025                    opt[pos].price = ZSTD_MAX_PRICE;   /* mlen, litlen and price will be fixed during forward scanning */
1026                }
1027                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
1028                    U32 const offset = matches[matchNb].off;
1029                    U32 const end = matches[matchNb].len;
1030                    for ( ; pos <= end ; pos++ ) {
1031                        U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
1032                        U32 const sequencePrice = literalsPrice + matchPrice;
1033                        DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
1034                                    pos, ZSTD_fCost(sequencePrice));
1035                        opt[pos].mlen = pos;
1036                        opt[pos].off = offset;
1037                        opt[pos].litlen = litlen;
1038                        opt[pos].price = sequencePrice;
1039                }   }
1040                last_pos = pos-1;
1041            }
1042        }
1043
1044        /* check further positions */
1045        for (cur = 1; cur <= last_pos; cur++) {
1046            const BYTE* const inr = ip + cur;
1047            assert(cur < ZSTD_OPT_NUM);
1048            DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
1049
1050            /* Fix current position with one literal if cheaper */
1051            {   U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
1052                int const price = opt[cur-1].price
1053                                + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
1054                                + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
1055                                - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
1056                assert(price < 1000000000); /* overflow check */
1057                if (price <= opt[cur].price) {
1058                    DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
1059                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
1060                                opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
1061                    opt[cur].mlen = 0;
1062                    opt[cur].off = 0;
1063                    opt[cur].litlen = litlen;
1064                    opt[cur].price = price;
1065                } else {
1066                    DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
1067                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
1068                                opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
1069                }
1070            }
1071
1072            /* Set the repcodes of the current position. We must do it here
1073             * because we rely on the repcodes of the 2nd to last sequence being
1074             * correct to set the next chunks repcodes during the backward
1075             * traversal.
1076             */
1077            ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
1078            assert(cur >= opt[cur].mlen);
1079            if (opt[cur].mlen != 0) {
1080                U32 const prev = cur - opt[cur].mlen;
1081                repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
1082                ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
1083            } else {
1084                ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
1085            }
1086
1087            /* last match must start at a minimum distance of 8 from oend */
1088            if (inr > ilimit) continue;
1089
1090            if (cur == last_pos) break;
1091
1092            if ( (optLevel==0) /*static_test*/
1093              && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
1094                DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
1095                continue;  /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
1096            }
1097
1098            {   U32 const ll0 = (opt[cur].mlen != 0);
1099                U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
1100                U32 const previousPrice = opt[cur].price;
1101                U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
1102                U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
1103                U32 matchNb;
1104
1105                ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
1106                                                  (U32)(inr-istart), (U32)(iend-inr));
1107
1108                if (!nbMatches) {
1109                    DEBUGLOG(7, "rPos:%u : no match found", cur);
1110                    continue;
1111                }
1112
1113                {   U32 const maxML = matches[nbMatches-1].len;
1114                    DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
1115                                inr-istart, cur, nbMatches, maxML);
1116
1117                    if ( (maxML > sufficient_len)
1118                      || (cur + maxML >= ZSTD_OPT_NUM) ) {
1119                        lastSequence.mlen = maxML;
1120                        lastSequence.off = matches[nbMatches-1].off;
1121                        lastSequence.litlen = litlen;
1122                        cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0;  /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
1123                        last_pos = cur + ZSTD_totalLen(lastSequence);
1124                        if (cur > ZSTD_OPT_NUM) cur = 0;   /* underflow => first match */
1125                        goto _shortestPath;
1126                }   }
1127
1128                /* set prices using matches found at position == cur */
1129                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
1130                    U32 const offset = matches[matchNb].off;
1131                    U32 const lastML = matches[matchNb].len;
1132                    U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
1133                    U32 mlen;
1134
1135                    DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
1136                                matchNb, matches[matchNb].off, lastML, litlen);
1137
1138                    for (mlen = lastML; mlen >= startML; mlen--) {  /* scan downward */
1139                        U32 const pos = cur + mlen;
1140                        int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
1141
1142                        if ((pos > last_pos) || (price < opt[pos].price)) {
1143                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
1144                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
1145                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }   /* fill empty positions */
1146                            opt[pos].mlen = mlen;
1147                            opt[pos].off = offset;
1148                            opt[pos].litlen = litlen;
1149                            opt[pos].price = price;
1150                        } else {
1151                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
1152                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
1153                            if (optLevel==0) break;  /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
1154                        }
1155            }   }   }
1156        }  /* for (cur = 1; cur <= last_pos; cur++) */
1157
1158        lastSequence = opt[last_pos];
1159        cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0;  /* single sequence, and it starts before `ip` */
1160        assert(cur < ZSTD_OPT_NUM);  /* control overflow*/
1161
1162_shortestPath:   /* cur, last_pos, best_mlen, best_off have to be set */
1163        assert(opt[0].mlen == 0);
1164
1165        /* Set the next chunk's repcodes based on the repcodes of the beginning
1166         * of the last match, and the last sequence. This avoids us having to
1167         * update them while traversing the sequences.
1168         */
1169        if (lastSequence.mlen != 0) {
1170            repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
1171            ZSTD_memcpy(rep, &reps, sizeof(reps));
1172        } else {
1173            ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
1174        }
1175
1176        {   U32 const storeEnd = cur + 1;
1177            U32 storeStart = storeEnd;
1178            U32 seqPos = cur;
1179
1180            DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
1181                        last_pos, cur); (void)last_pos;
1182            assert(storeEnd < ZSTD_OPT_NUM);
1183            DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
1184                        storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
1185            opt[storeEnd] = lastSequence;
1186            while (seqPos > 0) {
1187                U32 const backDist = ZSTD_totalLen(opt[seqPos]);
1188                storeStart--;
1189                DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
1190                            seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
1191                opt[storeStart] = opt[seqPos];
1192                seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
1193            }
1194
1195            /* save sequences */
1196            DEBUGLOG(6, "sending selected sequences into seqStore")
1197            {   U32 storePos;
1198                for (storePos=storeStart; storePos <= storeEnd; storePos++) {
1199                    U32 const llen = opt[storePos].litlen;
1200                    U32 const mlen = opt[storePos].mlen;
1201                    U32 const offCode = opt[storePos].off;
1202                    U32 const advance = llen + mlen;
1203                    DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
1204                                anchor - istart, (unsigned)llen, (unsigned)mlen);
1205
1206                    if (mlen==0) {  /* only literals => must be last "sequence", actually starting a new stream of sequences */
1207                        assert(storePos == storeEnd);   /* must be last sequence */
1208                        ip = anchor + llen;     /* last "sequence" is a bunch of literals => don't progress anchor */
1209                        continue;   /* will finish */
1210                    }
1211
1212                    assert(anchor + llen <= iend);
1213                    ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
1214                    ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
1215                    anchor += advance;
1216                    ip = anchor;
1217            }   }
1218            ZSTD_setBasePrices(optStatePtr, optLevel);
1219        }
1220    }   /* while (ip < ilimit) */
1221
1222    /* Return the last literals size */
1223    return (size_t)(iend - anchor);
1224}
1225
1226
1227size_t ZSTD_compressBlock_btopt(
1228        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1229        const void* src, size_t srcSize)
1230{
1231    DEBUGLOG(5, "ZSTD_compressBlock_btopt");
1232    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
1233}
1234
1235
1236/* used in 2-pass strategy */
1237static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
1238{
1239    U32 s, sum=0;
1240    assert(ZSTD_FREQ_DIV+bonus >= 0);
1241    for (s=0; s<lastEltIndex+1; s++) {
1242        table[s] <<= ZSTD_FREQ_DIV+bonus;
1243        table[s]--;
1244        sum += table[s];
1245    }
1246    return sum;
1247}
1248
1249/* used in 2-pass strategy */
1250MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
1251{
1252    if (ZSTD_compressedLiterals(optPtr))
1253        optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
1254    optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
1255    optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
1256    optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
1257}
1258
1259/* ZSTD_initStats_ultra():
1260 * make a first compression pass, just to seed stats with more accurate starting values.
1261 * only works on first block, with no dictionary and no ldm.
1262 * this function cannot error, hence its contract must be respected.
1263 */
1264static void
1265ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
1266                     seqStore_t* seqStore,
1267                     U32 rep[ZSTD_REP_NUM],
1268               const void* src, size_t srcSize)
1269{
1270    U32 tmpRep[ZSTD_REP_NUM];  /* updated rep codes will sink here */
1271    ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
1272
1273    DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
1274    assert(ms->opt.litLengthSum == 0);    /* first block */
1275    assert(seqStore->sequences == seqStore->sequencesStart);   /* no ldm */
1276    assert(ms->window.dictLimit == ms->window.lowLimit);   /* no dictionary */
1277    assert(ms->window.dictLimit - ms->nextToUpdate <= 1);  /* no prefix (note: intentional overflow, defined as 2-complement) */
1278
1279    ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);   /* generate stats into ms->opt*/
1280
1281    /* invalidate first scan from history */
1282    ZSTD_resetSeqStore(seqStore);
1283    ms->window.base -= srcSize;
1284    ms->window.dictLimit += (U32)srcSize;
1285    ms->window.lowLimit = ms->window.dictLimit;
1286    ms->nextToUpdate = ms->window.dictLimit;
1287
1288    /* re-inforce weight of collected statistics */
1289    ZSTD_upscaleStats(&ms->opt);
1290}
1291
1292size_t ZSTD_compressBlock_btultra(
1293        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1294        const void* src, size_t srcSize)
1295{
1296    DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
1297    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
1298}
1299
1300size_t ZSTD_compressBlock_btultra2(
1301        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1302        const void* src, size_t srcSize)
1303{
1304    U32 const curr = (U32)((const BYTE*)src - ms->window.base);
1305    DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
1306
1307    /* 2-pass strategy:
1308     * this strategy makes a first pass over first block to collect statistics
1309     * and seed next round's statistics with it.
1310     * After 1st pass, function forgets everything, and starts a new block.
1311     * Consequently, this can only work if no data has been previously loaded in tables,
1312     * aka, no dictionary, no prefix, no ldm preprocessing.
1313     * The compression ratio gain is generally small (~0.5% on first block),
1314     * the cost is 2x cpu time on first block. */
1315    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
1316    if ( (ms->opt.litLengthSum==0)   /* first block */
1317      && (seqStore->sequences == seqStore->sequencesStart)  /* no ldm */
1318      && (ms->window.dictLimit == ms->window.lowLimit)   /* no dictionary */
1319      && (curr == ms->window.dictLimit)   /* start of frame, nothing already loaded nor skipped */
1320      && (srcSize > ZSTD_PREDEF_THRESHOLD)
1321      ) {
1322        ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
1323    }
1324
1325    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
1326}
1327
1328size_t ZSTD_compressBlock_btopt_dictMatchState(
1329        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1330        const void* src, size_t srcSize)
1331{
1332    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
1333}
1334
1335size_t ZSTD_compressBlock_btultra_dictMatchState(
1336        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1337        const void* src, size_t srcSize)
1338{
1339    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
1340}
1341
1342size_t ZSTD_compressBlock_btopt_extDict(
1343        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1344        const void* src, size_t srcSize)
1345{
1346    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
1347}
1348
1349size_t ZSTD_compressBlock_btultra_extDict(
1350        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1351        const void* src, size_t srcSize)
1352{
1353    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
1354}
1355
1356/* note : no btultra2 variant for extDict nor dictMatchState,
1357 * because btultra2 is not meant to work with dictionaries
1358 * and is only specific for the first block (no prefix) */
1359