huf_decompress.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740
  1. /* ******************************************************************
  2. * huff0 huffman decoder,
  3. * part of Finite State Entropy library
  4. * Copyright (c) Yann Collet, Facebook, Inc.
  5. *
  6. * You can contact the author at :
  7. * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
  8. *
  9. * This source code is licensed under both the BSD-style license (found in the
  10. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  11. * in the COPYING file in the root directory of this source tree).
  12. * You may select, at your option, one of the above-listed licenses.
  13. ****************************************************************** */
  14. /* **************************************************************
  15. * Dependencies
  16. ****************************************************************/
  17. #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
  18. #include "../common/compiler.h"
  19. #include "../common/bitstream.h" /* BIT_* */
  20. #include "../common/fse.h" /* to compress headers */
  21. #define HUF_STATIC_LINKING_ONLY
  22. #include "../common/huf.h"
  23. #include "../common/error_private.h"
  24. #include "../common/zstd_internal.h"
  25. /* **************************************************************
  26. * Constants
  27. ****************************************************************/
  28. #define HUF_DECODER_FAST_TABLELOG 11
  29. /* **************************************************************
  30. * Macros
  31. ****************************************************************/
  32. /* These two optional macros force the use one way or another of the two
  33. * Huffman decompression implementations. You can't force in both directions
  34. * at the same time.
  35. */
  36. #if defined(HUF_FORCE_DECOMPRESS_X1) && \
  37. defined(HUF_FORCE_DECOMPRESS_X2)
  38. #error "Cannot force the use of the X1 and X2 decoders at the same time!"
  39. #endif
  40. #if ZSTD_ENABLE_ASM_X86_64_BMI2 && DYNAMIC_BMI2
  41. # define HUF_ASM_X86_64_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE
  42. #else
  43. # define HUF_ASM_X86_64_BMI2_ATTRS
  44. #endif
  45. #define HUF_EXTERN_C
  46. #define HUF_ASM_DECL HUF_EXTERN_C
  47. #if DYNAMIC_BMI2 || (ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__))
  48. # define HUF_NEED_BMI2_FUNCTION 1
  49. #else
  50. # define HUF_NEED_BMI2_FUNCTION 0
  51. #endif
  52. #if !(ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__))
  53. # define HUF_NEED_DEFAULT_FUNCTION 1
  54. #else
  55. # define HUF_NEED_DEFAULT_FUNCTION 0
  56. #endif
  57. /* **************************************************************
  58. * Error Management
  59. ****************************************************************/
  60. #define HUF_isError ERR_isError
  61. /* **************************************************************
  62. * Byte alignment for workSpace management
  63. ****************************************************************/
  64. #define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
  65. #define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
  66. /* **************************************************************
  67. * BMI2 Variant Wrappers
  68. ****************************************************************/
  69. #if DYNAMIC_BMI2
  70. #define HUF_DGEN(fn) \
  71. \
  72. static size_t fn##_default( \
  73. void* dst, size_t dstSize, \
  74. const void* cSrc, size_t cSrcSize, \
  75. const HUF_DTable* DTable) \
  76. { \
  77. return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
  78. } \
  79. \
  80. static BMI2_TARGET_ATTRIBUTE size_t fn##_bmi2( \
  81. void* dst, size_t dstSize, \
  82. const void* cSrc, size_t cSrcSize, \
  83. const HUF_DTable* DTable) \
  84. { \
  85. return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
  86. } \
  87. \
  88. static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
  89. size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
  90. { \
  91. if (bmi2) { \
  92. return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
  93. } \
  94. return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
  95. }
  96. #else
  97. #define HUF_DGEN(fn) \
  98. static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
  99. size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
  100. { \
  101. (void)bmi2; \
  102. return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
  103. }
  104. #endif
  105. /*-***************************/
  106. /* generic DTableDesc */
  107. /*-***************************/
  108. typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
  109. static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
  110. {
  111. DTableDesc dtd;
  112. ZSTD_memcpy(&dtd, table, sizeof(dtd));
  113. return dtd;
  114. }
  115. #if ZSTD_ENABLE_ASM_X86_64_BMI2
  116. static size_t HUF_initDStream(BYTE const* ip) {
  117. BYTE const lastByte = ip[7];
  118. size_t const bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
  119. size_t const value = MEM_readLEST(ip) | 1;
  120. assert(bitsConsumed <= 8);
  121. return value << bitsConsumed;
  122. }
  123. typedef struct {
  124. BYTE const* ip[4];
  125. BYTE* op[4];
  126. U64 bits[4];
  127. void const* dt;
  128. BYTE const* ilimit;
  129. BYTE* oend;
  130. BYTE const* iend[4];
  131. } HUF_DecompressAsmArgs;
  132. /*
  133. * Initializes args for the asm decoding loop.
  134. * @returns 0 on success
  135. * 1 if the fallback implementation should be used.
  136. * Or an error code on failure.
  137. */
  138. static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable)
  139. {
  140. void const* dt = DTable + 1;
  141. U32 const dtLog = HUF_getDTableDesc(DTable).tableLog;
  142. const BYTE* const ilimit = (const BYTE*)src + 6 + 8;
  143. BYTE* const oend = (BYTE*)dst + dstSize;
  144. /* The following condition is false on x32 platform,
  145. * but HUF_asm is not compatible with this ABI */
  146. if (!(MEM_isLittleEndian() && !MEM_32bits())) return 1;
  147. /* strict minimum : jump table + 1 byte per stream */
  148. if (srcSize < 10)
  149. return ERROR(corruption_detected);
  150. /* Must have at least 8 bytes per stream because we don't handle initializing smaller bit containers.
  151. * If table log is not correct at this point, fallback to the old decoder.
  152. * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder.
  153. */
  154. if (dtLog != HUF_DECODER_FAST_TABLELOG)
  155. return 1;
  156. /* Read the jump table. */
  157. {
  158. const BYTE* const istart = (const BYTE*)src;
  159. size_t const length1 = MEM_readLE16(istart);
  160. size_t const length2 = MEM_readLE16(istart+2);
  161. size_t const length3 = MEM_readLE16(istart+4);
  162. size_t const length4 = srcSize - (length1 + length2 + length3 + 6);
  163. args->iend[0] = istart + 6; /* jumpTable */
  164. args->iend[1] = args->iend[0] + length1;
  165. args->iend[2] = args->iend[1] + length2;
  166. args->iend[3] = args->iend[2] + length3;
  167. /* HUF_initDStream() requires this, and this small of an input
  168. * won't benefit from the ASM loop anyways.
  169. * length1 must be >= 16 so that ip[0] >= ilimit before the loop
  170. * starts.
  171. */
  172. if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8)
  173. return 1;
  174. if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */
  175. }
  176. /* ip[] contains the position that is currently loaded into bits[]. */
  177. args->ip[0] = args->iend[1] - sizeof(U64);
  178. args->ip[1] = args->iend[2] - sizeof(U64);
  179. args->ip[2] = args->iend[3] - sizeof(U64);
  180. args->ip[3] = (BYTE const*)src + srcSize - sizeof(U64);
  181. /* op[] contains the output pointers. */
  182. args->op[0] = (BYTE*)dst;
  183. args->op[1] = args->op[0] + (dstSize+3)/4;
  184. args->op[2] = args->op[1] + (dstSize+3)/4;
  185. args->op[3] = args->op[2] + (dstSize+3)/4;
  186. /* No point to call the ASM loop for tiny outputs. */
  187. if (args->op[3] >= oend)
  188. return 1;
  189. /* bits[] is the bit container.
  190. * It is read from the MSB down to the LSB.
  191. * It is shifted left as it is read, and zeros are
  192. * shifted in. After the lowest valid bit a 1 is
  193. * set, so that CountTrailingZeros(bits[]) can be used
  194. * to count how many bits we've consumed.
  195. */
  196. args->bits[0] = HUF_initDStream(args->ip[0]);
  197. args->bits[1] = HUF_initDStream(args->ip[1]);
  198. args->bits[2] = HUF_initDStream(args->ip[2]);
  199. args->bits[3] = HUF_initDStream(args->ip[3]);
  200. /* If ip[] >= ilimit, it is guaranteed to be safe to
  201. * reload bits[]. It may be beyond its section, but is
  202. * guaranteed to be valid (>= istart).
  203. */
  204. args->ilimit = ilimit;
  205. args->oend = oend;
  206. args->dt = dt;
  207. return 0;
  208. }
  209. static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs const* args, int stream, BYTE* segmentEnd)
  210. {
  211. /* Validate that we haven't overwritten. */
  212. if (args->op[stream] > segmentEnd)
  213. return ERROR(corruption_detected);
  214. /* Validate that we haven't read beyond iend[].
  215. * Note that ip[] may be < iend[] because the MSB is
  216. * the next bit to read, and we may have consumed 100%
  217. * of the stream, so down to iend[i] - 8 is valid.
  218. */
  219. if (args->ip[stream] < args->iend[stream] - 8)
  220. return ERROR(corruption_detected);
  221. /* Construct the BIT_DStream_t. */
  222. bit->bitContainer = MEM_readLE64(args->ip[stream]);
  223. bit->bitsConsumed = ZSTD_countTrailingZeros((size_t)args->bits[stream]);
  224. bit->start = (const char*)args->iend[0];
  225. bit->limitPtr = bit->start + sizeof(size_t);
  226. bit->ptr = (const char*)args->ip[stream];
  227. return 0;
  228. }
  229. #endif
  230. #ifndef HUF_FORCE_DECOMPRESS_X2
  231. /*-***************************/
  232. /* single-symbol decoding */
  233. /*-***************************/
  234. typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decoding */
  235. /*
  236. * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
  237. * a time.
  238. */
  239. static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
  240. U64 D4;
  241. if (MEM_isLittleEndian()) {
  242. D4 = (symbol << 8) + nbBits;
  243. } else {
  244. D4 = symbol + (nbBits << 8);
  245. }
  246. D4 *= 0x0001000100010001ULL;
  247. return D4;
  248. }
  249. /*
  250. * Increase the tableLog to targetTableLog and rescales the stats.
  251. * If tableLog > targetTableLog this is a no-op.
  252. * @returns New tableLog
  253. */
  254. static U32 HUF_rescaleStats(BYTE* huffWeight, U32* rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog)
  255. {
  256. if (tableLog > targetTableLog)
  257. return tableLog;
  258. if (tableLog < targetTableLog) {
  259. U32 const scale = targetTableLog - tableLog;
  260. U32 s;
  261. /* Increase the weight for all non-zero probability symbols by scale. */
  262. for (s = 0; s < nbSymbols; ++s) {
  263. huffWeight[s] += (BYTE)((huffWeight[s] == 0) ? 0 : scale);
  264. }
  265. /* Update rankVal to reflect the new weights.
  266. * All weights except 0 get moved to weight + scale.
  267. * Weights [1, scale] are empty.
  268. */
  269. for (s = targetTableLog; s > scale; --s) {
  270. rankVal[s] = rankVal[s - scale];
  271. }
  272. for (s = scale; s > 0; --s) {
  273. rankVal[s] = 0;
  274. }
  275. }
  276. return targetTableLog;
  277. }
  278. typedef struct {
  279. U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
  280. U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
  281. U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
  282. BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
  283. BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
  284. } HUF_ReadDTableX1_Workspace;
  285. size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
  286. {
  287. return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
  288. }
  289. size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
  290. {
  291. U32 tableLog = 0;
  292. U32 nbSymbols = 0;
  293. size_t iSize;
  294. void* const dtPtr = DTable + 1;
  295. HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
  296. HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
  297. DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
  298. if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
  299. DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
  300. /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
  301. iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
  302. if (HUF_isError(iSize)) return iSize;
  303. /* Table header */
  304. { DTableDesc dtd = HUF_getDTableDesc(DTable);
  305. U32 const maxTableLog = dtd.maxTableLog + 1;
  306. U32 const targetTableLog = MIN(maxTableLog, HUF_DECODER_FAST_TABLELOG);
  307. tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog);
  308. if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
  309. dtd.tableType = 0;
  310. dtd.tableLog = (BYTE)tableLog;
  311. ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
  312. }
  313. /* Compute symbols and rankStart given rankVal:
  314. *
  315. * rankVal already contains the number of values of each weight.
  316. *
  317. * symbols contains the symbols ordered by weight. First are the rankVal[0]
  318. * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
  319. * symbols[0] is filled (but unused) to avoid a branch.
  320. *
  321. * rankStart contains the offset where each rank belongs in the DTable.
  322. * rankStart[0] is not filled because there are no entries in the table for
  323. * weight 0.
  324. */
  325. {
  326. int n;
  327. int nextRankStart = 0;
  328. int const unroll = 4;
  329. int const nLimit = (int)nbSymbols - unroll + 1;
  330. for (n=0; n<(int)tableLog+1; n++) {
  331. U32 const curr = nextRankStart;
  332. nextRankStart += wksp->rankVal[n];
  333. wksp->rankStart[n] = curr;
  334. }
  335. for (n=0; n < nLimit; n += unroll) {
  336. int u;
  337. for (u=0; u < unroll; ++u) {
  338. size_t const w = wksp->huffWeight[n+u];
  339. wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
  340. }
  341. }
  342. for (; n < (int)nbSymbols; ++n) {
  343. size_t const w = wksp->huffWeight[n];
  344. wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
  345. }
  346. }
  347. /* fill DTable
  348. * We fill all entries of each weight in order.
  349. * That way length is a constant for each iteration of the outer loop.
  350. * We can switch based on the length to a different inner loop which is
  351. * optimized for that particular case.
  352. */
  353. {
  354. U32 w;
  355. int symbol=wksp->rankVal[0];
  356. int rankStart=0;
  357. for (w=1; w<tableLog+1; ++w) {
  358. int const symbolCount = wksp->rankVal[w];
  359. int const length = (1 << w) >> 1;
  360. int uStart = rankStart;
  361. BYTE const nbBits = (BYTE)(tableLog + 1 - w);
  362. int s;
  363. int u;
  364. switch (length) {
  365. case 1:
  366. for (s=0; s<symbolCount; ++s) {
  367. HUF_DEltX1 D;
  368. D.byte = wksp->symbols[symbol + s];
  369. D.nbBits = nbBits;
  370. dt[uStart] = D;
  371. uStart += 1;
  372. }
  373. break;
  374. case 2:
  375. for (s=0; s<symbolCount; ++s) {
  376. HUF_DEltX1 D;
  377. D.byte = wksp->symbols[symbol + s];
  378. D.nbBits = nbBits;
  379. dt[uStart+0] = D;
  380. dt[uStart+1] = D;
  381. uStart += 2;
  382. }
  383. break;
  384. case 4:
  385. for (s=0; s<symbolCount; ++s) {
  386. U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
  387. MEM_write64(dt + uStart, D4);
  388. uStart += 4;
  389. }
  390. break;
  391. case 8:
  392. for (s=0; s<symbolCount; ++s) {
  393. U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
  394. MEM_write64(dt + uStart, D4);
  395. MEM_write64(dt + uStart + 4, D4);
  396. uStart += 8;
  397. }
  398. break;
  399. default:
  400. for (s=0; s<symbolCount; ++s) {
  401. U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
  402. for (u=0; u < length; u += 16) {
  403. MEM_write64(dt + uStart + u + 0, D4);
  404. MEM_write64(dt + uStart + u + 4, D4);
  405. MEM_write64(dt + uStart + u + 8, D4);
  406. MEM_write64(dt + uStart + u + 12, D4);
  407. }
  408. assert(u == length);
  409. uStart += length;
  410. }
  411. break;
  412. }
  413. symbol += symbolCount;
  414. rankStart += symbolCount * length;
  415. }
  416. }
  417. return iSize;
  418. }
  419. FORCE_INLINE_TEMPLATE BYTE
  420. HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
  421. {
  422. size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
  423. BYTE const c = dt[val].byte;
  424. BIT_skipBits(Dstream, dt[val].nbBits);
  425. return c;
  426. }
  427. #define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
  428. *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
  429. #define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
  430. if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
  431. HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
  432. #define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
  433. if (MEM_64bits()) \
  434. HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
  435. HINT_INLINE size_t
  436. HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
  437. {
  438. BYTE* const pStart = p;
  439. /* up to 4 symbols at a time */
  440. if ((pEnd - p) > 3) {
  441. while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
  442. HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
  443. HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
  444. HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
  445. HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
  446. }
  447. } else {
  448. BIT_reloadDStream(bitDPtr);
  449. }
  450. /* [0-3] symbols remaining */
  451. if (MEM_32bits())
  452. while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
  453. HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
  454. /* no more data to retrieve from bitstream, no need to reload */
  455. while (p < pEnd)
  456. HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
  457. return pEnd-pStart;
  458. }
  459. FORCE_INLINE_TEMPLATE size_t
  460. HUF_decompress1X1_usingDTable_internal_body(
  461. void* dst, size_t dstSize,
  462. const void* cSrc, size_t cSrcSize,
  463. const HUF_DTable* DTable)
  464. {
  465. BYTE* op = (BYTE*)dst;
  466. BYTE* const oend = op + dstSize;
  467. const void* dtPtr = DTable + 1;
  468. const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
  469. BIT_DStream_t bitD;
  470. DTableDesc const dtd = HUF_getDTableDesc(DTable);
  471. U32 const dtLog = dtd.tableLog;
  472. CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
  473. HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
  474. if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
  475. return dstSize;
  476. }
  477. FORCE_INLINE_TEMPLATE size_t
  478. HUF_decompress4X1_usingDTable_internal_body(
  479. void* dst, size_t dstSize,
  480. const void* cSrc, size_t cSrcSize,
  481. const HUF_DTable* DTable)
  482. {
  483. /* Check */
  484. if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
  485. { const BYTE* const istart = (const BYTE*) cSrc;
  486. BYTE* const ostart = (BYTE*) dst;
  487. BYTE* const oend = ostart + dstSize;
  488. BYTE* const olimit = oend - 3;
  489. const void* const dtPtr = DTable + 1;
  490. const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
  491. /* Init */
  492. BIT_DStream_t bitD1;
  493. BIT_DStream_t bitD2;
  494. BIT_DStream_t bitD3;
  495. BIT_DStream_t bitD4;
  496. size_t const length1 = MEM_readLE16(istart);
  497. size_t const length2 = MEM_readLE16(istart+2);
  498. size_t const length3 = MEM_readLE16(istart+4);
  499. size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
  500. const BYTE* const istart1 = istart + 6; /* jumpTable */
  501. const BYTE* const istart2 = istart1 + length1;
  502. const BYTE* const istart3 = istart2 + length2;
  503. const BYTE* const istart4 = istart3 + length3;
  504. const size_t segmentSize = (dstSize+3) / 4;
  505. BYTE* const opStart2 = ostart + segmentSize;
  506. BYTE* const opStart3 = opStart2 + segmentSize;
  507. BYTE* const opStart4 = opStart3 + segmentSize;
  508. BYTE* op1 = ostart;
  509. BYTE* op2 = opStart2;
  510. BYTE* op3 = opStart3;
  511. BYTE* op4 = opStart4;
  512. DTableDesc const dtd = HUF_getDTableDesc(DTable);
  513. U32 const dtLog = dtd.tableLog;
  514. U32 endSignal = 1;
  515. if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
  516. if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
  517. CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
  518. CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
  519. CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
  520. CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
  521. /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
  522. if ((size_t)(oend - op4) >= sizeof(size_t)) {
  523. for ( ; (endSignal) & (op4 < olimit) ; ) {
  524. HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
  525. HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
  526. HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
  527. HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
  528. HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
  529. HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
  530. HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
  531. HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
  532. HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
  533. HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
  534. HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
  535. HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
  536. HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
  537. HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
  538. HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
  539. HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
  540. endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
  541. endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
  542. endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
  543. endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
  544. }
  545. }
  546. /* check corruption */
  547. /* note : should not be necessary : op# advance in lock step, and we control op4.
  548. * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
  549. if (op1 > opStart2) return ERROR(corruption_detected);
  550. if (op2 > opStart3) return ERROR(corruption_detected);
  551. if (op3 > opStart4) return ERROR(corruption_detected);
  552. /* note : op4 supposed already verified within main loop */
  553. /* finish bitStreams one by one */
  554. HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
  555. HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
  556. HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
  557. HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog);
  558. /* check */
  559. { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
  560. if (!endCheck) return ERROR(corruption_detected); }
  561. /* decoded size */
  562. return dstSize;
  563. }
  564. }
  565. #if HUF_NEED_BMI2_FUNCTION
  566. static BMI2_TARGET_ATTRIBUTE
  567. size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
  568. size_t cSrcSize, HUF_DTable const* DTable) {
  569. return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
  570. }
  571. #endif
  572. #if HUF_NEED_DEFAULT_FUNCTION
  573. static
  574. size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
  575. size_t cSrcSize, HUF_DTable const* DTable) {
  576. return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
  577. }
  578. #endif
  579. #if ZSTD_ENABLE_ASM_X86_64_BMI2
  580. HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
  581. static HUF_ASM_X86_64_BMI2_ATTRS
  582. size_t
  583. HUF_decompress4X1_usingDTable_internal_bmi2_asm(
  584. void* dst, size_t dstSize,
  585. const void* cSrc, size_t cSrcSize,
  586. const HUF_DTable* DTable)
  587. {
  588. void const* dt = DTable + 1;
  589. const BYTE* const iend = (const BYTE*)cSrc + 6;
  590. BYTE* const oend = (BYTE*)dst + dstSize;
  591. HUF_DecompressAsmArgs args;
  592. {
  593. size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
  594. FORWARD_IF_ERROR(ret, "Failed to init asm args");
  595. if (ret != 0)
  596. return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
  597. }
  598. assert(args.ip[0] >= args.ilimit);
  599. HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(&args);
  600. /* Our loop guarantees that ip[] >= ilimit and that we haven't
  601. * overwritten any op[].
  602. */
  603. assert(args.ip[0] >= iend);
  604. assert(args.ip[1] >= iend);
  605. assert(args.ip[2] >= iend);
  606. assert(args.ip[3] >= iend);
  607. assert(args.op[3] <= oend);
  608. (void)iend;
  609. /* finish bit streams one by one. */
  610. {
  611. size_t const segmentSize = (dstSize+3) / 4;
  612. BYTE* segmentEnd = (BYTE*)dst;
  613. int i;
  614. for (i = 0; i < 4; ++i) {
  615. BIT_DStream_t bit;
  616. if (segmentSize <= (size_t)(oend - segmentEnd))
  617. segmentEnd += segmentSize;
  618. else
  619. segmentEnd = oend;
  620. FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
  621. /* Decompress and validate that we've produced exactly the expected length. */
  622. args.op[i] += HUF_decodeStreamX1(args.op[i], &bit, segmentEnd, (HUF_DEltX1 const*)dt, HUF_DECODER_FAST_TABLELOG);
  623. if (args.op[i] != segmentEnd) return ERROR(corruption_detected);
  624. }
  625. }
  626. /* decoded size */
  627. return dstSize;
  628. }
  629. #endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */
  630. typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
  631. const void *cSrc,
  632. size_t cSrcSize,
  633. const HUF_DTable *DTable);
  634. HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
  635. static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
  636. size_t cSrcSize, HUF_DTable const* DTable, int bmi2)
  637. {
  638. #if DYNAMIC_BMI2
  639. if (bmi2) {
  640. # if ZSTD_ENABLE_ASM_X86_64_BMI2
  641. return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
  642. # else
  643. return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
  644. # endif
  645. }
  646. #else
  647. (void)bmi2;
  648. #endif
  649. #if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
  650. return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
  651. #else
  652. return HUF_decompress4X1_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable);
  653. #endif
  654. }
  655. size_t HUF_decompress1X1_usingDTable(
  656. void* dst, size_t dstSize,
  657. const void* cSrc, size_t cSrcSize,
  658. const HUF_DTable* DTable)
  659. {
  660. DTableDesc dtd = HUF_getDTableDesc(DTable);
  661. if (dtd.tableType != 0) return ERROR(GENERIC);
  662. return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
  663. }
  664. size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
  665. const void* cSrc, size_t cSrcSize,
  666. void* workSpace, size_t wkspSize)
  667. {
  668. const BYTE* ip = (const BYTE*) cSrc;
  669. size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
  670. if (HUF_isError(hSize)) return hSize;
  671. if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
  672. ip += hSize; cSrcSize -= hSize;
  673. return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
  674. }
  675. size_t HUF_decompress4X1_usingDTable(
  676. void* dst, size_t dstSize,
  677. const void* cSrc, size_t cSrcSize,
  678. const HUF_DTable* DTable)
  679. {
  680. DTableDesc dtd = HUF_getDTableDesc(DTable);
  681. if (dtd.tableType != 0) return ERROR(GENERIC);
  682. return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
  683. }
  684. static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
  685. const void* cSrc, size_t cSrcSize,
  686. void* workSpace, size_t wkspSize, int bmi2)
  687. {
  688. const BYTE* ip = (const BYTE*) cSrc;
  689. size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
  690. if (HUF_isError(hSize)) return hSize;
  691. if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
  692. ip += hSize; cSrcSize -= hSize;
  693. return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
  694. }
  695. size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
  696. const void* cSrc, size_t cSrcSize,
  697. void* workSpace, size_t wkspSize)
  698. {
  699. return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
  700. }
  701. #endif /* HUF_FORCE_DECOMPRESS_X2 */
  702. #ifndef HUF_FORCE_DECOMPRESS_X1
  703. /* *************************/
  704. /* double-symbols decoding */
  705. /* *************************/
  706. typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */
  707. typedef struct { BYTE symbol; } sortedSymbol_t;
  708. typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
  709. typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
  710. /*
  711. * Constructs a HUF_DEltX2 in a U32.
  712. */
  713. static U32 HUF_buildDEltX2U32(U32 symbol, U32 nbBits, U32 baseSeq, int level)
  714. {
  715. U32 seq;
  716. DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, sequence) == 0);
  717. DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, nbBits) == 2);
  718. DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, length) == 3);
  719. DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U32));
  720. if (MEM_isLittleEndian()) {
  721. seq = level == 1 ? symbol : (baseSeq + (symbol << 8));
  722. return seq + (nbBits << 16) + ((U32)level << 24);
  723. } else {
  724. seq = level == 1 ? (symbol << 8) : ((baseSeq << 8) + symbol);
  725. return (seq << 16) + (nbBits << 8) + (U32)level;
  726. }
  727. }
  728. /*
  729. * Constructs a HUF_DEltX2.
  730. */
  731. static HUF_DEltX2 HUF_buildDEltX2(U32 symbol, U32 nbBits, U32 baseSeq, int level)
  732. {
  733. HUF_DEltX2 DElt;
  734. U32 const val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
  735. DEBUG_STATIC_ASSERT(sizeof(DElt) == sizeof(val));
  736. ZSTD_memcpy(&DElt, &val, sizeof(val));
  737. return DElt;
  738. }
  739. /*
  740. * Constructs 2 HUF_DEltX2s and packs them into a U64.
  741. */
  742. static U64 HUF_buildDEltX2U64(U32 symbol, U32 nbBits, U16 baseSeq, int level)
  743. {
  744. U32 DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
  745. return (U64)DElt + ((U64)DElt << 32);
  746. }
  747. /*
  748. * Fills the DTable rank with all the symbols from [begin, end) that are each
  749. * nbBits long.
  750. *
  751. * @param DTableRank The start of the rank in the DTable.
  752. * @param begin The first symbol to fill (inclusive).
  753. * @param end The last symbol to fill (exclusive).
  754. * @param nbBits Each symbol is nbBits long.
  755. * @param tableLog The table log.
  756. * @param baseSeq If level == 1 { 0 } else { the first level symbol }
  757. * @param level The level in the table. Must be 1 or 2.
  758. */
  759. static void HUF_fillDTableX2ForWeight(
  760. HUF_DEltX2* DTableRank,
  761. sortedSymbol_t const* begin, sortedSymbol_t const* end,
  762. U32 nbBits, U32 tableLog,
  763. U16 baseSeq, int const level)
  764. {
  765. U32 const length = 1U << ((tableLog - nbBits) & 0x1F /* quiet static-analyzer */);
  766. const sortedSymbol_t* ptr;
  767. assert(level >= 1 && level <= 2);
  768. switch (length) {
  769. case 1:
  770. for (ptr = begin; ptr != end; ++ptr) {
  771. HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
  772. *DTableRank++ = DElt;
  773. }
  774. break;
  775. case 2:
  776. for (ptr = begin; ptr != end; ++ptr) {
  777. HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
  778. DTableRank[0] = DElt;
  779. DTableRank[1] = DElt;
  780. DTableRank += 2;
  781. }
  782. break;
  783. case 4:
  784. for (ptr = begin; ptr != end; ++ptr) {
  785. U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
  786. ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
  787. ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
  788. DTableRank += 4;
  789. }
  790. break;
  791. case 8:
  792. for (ptr = begin; ptr != end; ++ptr) {
  793. U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
  794. ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
  795. ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
  796. ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
  797. ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
  798. DTableRank += 8;
  799. }
  800. break;
  801. default:
  802. for (ptr = begin; ptr != end; ++ptr) {
  803. U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
  804. HUF_DEltX2* const DTableRankEnd = DTableRank + length;
  805. for (; DTableRank != DTableRankEnd; DTableRank += 8) {
  806. ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
  807. ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
  808. ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
  809. ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
  810. }
  811. }
  812. break;
  813. }
  814. }
  815. /* HUF_fillDTableX2Level2() :
  816. * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
  817. static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 consumedBits,
  818. const U32* rankVal, const int minWeight, const int maxWeight1,
  819. const sortedSymbol_t* sortedSymbols, U32 const* rankStart,
  820. U32 nbBitsBaseline, U16 baseSeq)
  821. {
  822. /* Fill skipped values (all positions up to rankVal[minWeight]).
  823. * These are positions only get a single symbol because the combined weight
  824. * is too large.
  825. */
  826. if (minWeight>1) {
  827. U32 const length = 1U << ((targetLog - consumedBits) & 0x1F /* quiet static-analyzer */);
  828. U64 const DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, /* baseSeq */ 0, /* level */ 1);
  829. int const skipSize = rankVal[minWeight];
  830. assert(length > 1);
  831. assert((U32)skipSize < length);
  832. switch (length) {
  833. case 2:
  834. assert(skipSize == 1);
  835. ZSTD_memcpy(DTable, &DEltX2, sizeof(DEltX2));
  836. break;
  837. case 4:
  838. assert(skipSize <= 4);
  839. ZSTD_memcpy(DTable + 0, &DEltX2, sizeof(DEltX2));
  840. ZSTD_memcpy(DTable + 2, &DEltX2, sizeof(DEltX2));
  841. break;
  842. default:
  843. {
  844. int i;
  845. for (i = 0; i < skipSize; i += 8) {
  846. ZSTD_memcpy(DTable + i + 0, &DEltX2, sizeof(DEltX2));
  847. ZSTD_memcpy(DTable + i + 2, &DEltX2, sizeof(DEltX2));
  848. ZSTD_memcpy(DTable + i + 4, &DEltX2, sizeof(DEltX2));
  849. ZSTD_memcpy(DTable + i + 6, &DEltX2, sizeof(DEltX2));
  850. }
  851. }
  852. }
  853. }
  854. /* Fill each of the second level symbols by weight. */
  855. {
  856. int w;
  857. for (w = minWeight; w < maxWeight1; ++w) {
  858. int const begin = rankStart[w];
  859. int const end = rankStart[w+1];
  860. U32 const nbBits = nbBitsBaseline - w;
  861. U32 const totalBits = nbBits + consumedBits;
  862. HUF_fillDTableX2ForWeight(
  863. DTable + rankVal[w],
  864. sortedSymbols + begin, sortedSymbols + end,
  865. totalBits, targetLog,
  866. baseSeq, /* level */ 2);
  867. }
  868. }
  869. }
  870. static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
  871. const sortedSymbol_t* sortedList,
  872. const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
  873. const U32 nbBitsBaseline)
  874. {
  875. U32* const rankVal = rankValOrigin[0];
  876. const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
  877. const U32 minBits = nbBitsBaseline - maxWeight;
  878. int w;
  879. int const wEnd = (int)maxWeight + 1;
  880. /* Fill DTable in order of weight. */
  881. for (w = 1; w < wEnd; ++w) {
  882. int const begin = (int)rankStart[w];
  883. int const end = (int)rankStart[w+1];
  884. U32 const nbBits = nbBitsBaseline - w;
  885. if (targetLog-nbBits >= minBits) {
  886. /* Enough room for a second symbol. */
  887. int start = rankVal[w];
  888. U32 const length = 1U << ((targetLog - nbBits) & 0x1F /* quiet static-analyzer */);
  889. int minWeight = nbBits + scaleLog;
  890. int s;
  891. if (minWeight < 1) minWeight = 1;
  892. /* Fill the DTable for every symbol of weight w.
  893. * These symbols get at least 1 second symbol.
  894. */
  895. for (s = begin; s != end; ++s) {
  896. HUF_fillDTableX2Level2(
  897. DTable + start, targetLog, nbBits,
  898. rankValOrigin[nbBits], minWeight, wEnd,
  899. sortedList, rankStart,
  900. nbBitsBaseline, sortedList[s].symbol);
  901. start += length;
  902. }
  903. } else {
  904. /* Only a single symbol. */
  905. HUF_fillDTableX2ForWeight(
  906. DTable + rankVal[w],
  907. sortedList + begin, sortedList + end,
  908. nbBits, targetLog,
  909. /* baseSeq */ 0, /* level */ 1);
  910. }
  911. }
  912. }
  913. typedef struct {
  914. rankValCol_t rankVal[HUF_TABLELOG_MAX];
  915. U32 rankStats[HUF_TABLELOG_MAX + 1];
  916. U32 rankStart0[HUF_TABLELOG_MAX + 3];
  917. sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
  918. BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
  919. U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
  920. } HUF_ReadDTableX2_Workspace;
  921. size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
  922. const void* src, size_t srcSize,
  923. void* workSpace, size_t wkspSize)
  924. {
  925. return HUF_readDTableX2_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
  926. }
  927. size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable,
  928. const void* src, size_t srcSize,
  929. void* workSpace, size_t wkspSize, int bmi2)
  930. {
  931. U32 tableLog, maxW, nbSymbols;
  932. DTableDesc dtd = HUF_getDTableDesc(DTable);
  933. U32 maxTableLog = dtd.maxTableLog;
  934. size_t iSize;
  935. void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
  936. HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
  937. U32 *rankStart;
  938. HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
  939. if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
  940. rankStart = wksp->rankStart0 + 1;
  941. ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
  942. ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
  943. DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
  944. if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
  945. /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
  946. iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), bmi2);
  947. if (HUF_isError(iSize)) return iSize;
  948. /* check result */
  949. if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
  950. if (tableLog <= HUF_DECODER_FAST_TABLELOG && maxTableLog > HUF_DECODER_FAST_TABLELOG) maxTableLog = HUF_DECODER_FAST_TABLELOG;
  951. /* find maxWeight */
  952. for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
  953. /* Get start index of each weight */
  954. { U32 w, nextRankStart = 0;
  955. for (w=1; w<maxW+1; w++) {
  956. U32 curr = nextRankStart;
  957. nextRankStart += wksp->rankStats[w];
  958. rankStart[w] = curr;
  959. }
  960. rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
  961. rankStart[maxW+1] = nextRankStart;
  962. }
  963. /* sort symbols by weight */
  964. { U32 s;
  965. for (s=0; s<nbSymbols; s++) {
  966. U32 const w = wksp->weightList[s];
  967. U32 const r = rankStart[w]++;
  968. wksp->sortedSymbol[r].symbol = (BYTE)s;
  969. }
  970. rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
  971. }
  972. /* Build rankVal */
  973. { U32* const rankVal0 = wksp->rankVal[0];
  974. { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
  975. U32 nextRankVal = 0;
  976. U32 w;
  977. for (w=1; w<maxW+1; w++) {
  978. U32 curr = nextRankVal;
  979. nextRankVal += wksp->rankStats[w] << (w+rescale);
  980. rankVal0[w] = curr;
  981. } }
  982. { U32 const minBits = tableLog+1 - maxW;
  983. U32 consumed;
  984. for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
  985. U32* const rankValPtr = wksp->rankVal[consumed];
  986. U32 w;
  987. for (w = 1; w < maxW+1; w++) {
  988. rankValPtr[w] = rankVal0[w] >> consumed;
  989. } } } }
  990. HUF_fillDTableX2(dt, maxTableLog,
  991. wksp->sortedSymbol,
  992. wksp->rankStart0, wksp->rankVal, maxW,
  993. tableLog+1);
  994. dtd.tableLog = (BYTE)maxTableLog;
  995. dtd.tableType = 1;
  996. ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
  997. return iSize;
  998. }
  999. FORCE_INLINE_TEMPLATE U32
  1000. HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
  1001. {
  1002. size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
  1003. ZSTD_memcpy(op, &dt[val].sequence, 2);
  1004. BIT_skipBits(DStream, dt[val].nbBits);
  1005. return dt[val].length;
  1006. }
  1007. FORCE_INLINE_TEMPLATE U32
  1008. HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
  1009. {
  1010. size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
  1011. ZSTD_memcpy(op, &dt[val].sequence, 1);
  1012. if (dt[val].length==1) {
  1013. BIT_skipBits(DStream, dt[val].nbBits);
  1014. } else {
  1015. if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
  1016. BIT_skipBits(DStream, dt[val].nbBits);
  1017. if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
  1018. /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
  1019. DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
  1020. }
  1021. }
  1022. return 1;
  1023. }
  1024. #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
  1025. ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
  1026. #define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
  1027. if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
  1028. ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
  1029. #define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
  1030. if (MEM_64bits()) \
  1031. ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
  1032. HINT_INLINE size_t
  1033. HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
  1034. const HUF_DEltX2* const dt, const U32 dtLog)
  1035. {
  1036. BYTE* const pStart = p;
  1037. /* up to 8 symbols at a time */
  1038. if ((size_t)(pEnd - p) >= sizeof(bitDPtr->bitContainer)) {
  1039. if (dtLog <= 11 && MEM_64bits()) {
  1040. /* up to 10 symbols at a time */
  1041. while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-9)) {
  1042. HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
  1043. HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
  1044. HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
  1045. HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
  1046. HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
  1047. }
  1048. } else {
  1049. /* up to 8 symbols at a time */
  1050. while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
  1051. HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
  1052. HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
  1053. HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
  1054. HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
  1055. }
  1056. }
  1057. } else {
  1058. BIT_reloadDStream(bitDPtr);
  1059. }
  1060. /* closer to end : up to 2 symbols at a time */
  1061. if ((size_t)(pEnd - p) >= 2) {
  1062. while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
  1063. HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
  1064. while (p <= pEnd-2)
  1065. HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
  1066. }
  1067. if (p < pEnd)
  1068. p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
  1069. return p-pStart;
  1070. }
  1071. FORCE_INLINE_TEMPLATE size_t
  1072. HUF_decompress1X2_usingDTable_internal_body(
  1073. void* dst, size_t dstSize,
  1074. const void* cSrc, size_t cSrcSize,
  1075. const HUF_DTable* DTable)
  1076. {
  1077. BIT_DStream_t bitD;
  1078. /* Init */
  1079. CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
  1080. /* decode */
  1081. { BYTE* const ostart = (BYTE*) dst;
  1082. BYTE* const oend = ostart + dstSize;
  1083. const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
  1084. const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
  1085. DTableDesc const dtd = HUF_getDTableDesc(DTable);
  1086. HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
  1087. }
  1088. /* check */
  1089. if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
  1090. /* decoded size */
  1091. return dstSize;
  1092. }
  1093. FORCE_INLINE_TEMPLATE size_t
  1094. HUF_decompress4X2_usingDTable_internal_body(
  1095. void* dst, size_t dstSize,
  1096. const void* cSrc, size_t cSrcSize,
  1097. const HUF_DTable* DTable)
  1098. {
  1099. if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
  1100. { const BYTE* const istart = (const BYTE*) cSrc;
  1101. BYTE* const ostart = (BYTE*) dst;
  1102. BYTE* const oend = ostart + dstSize;
  1103. BYTE* const olimit = oend - (sizeof(size_t)-1);
  1104. const void* const dtPtr = DTable+1;
  1105. const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
  1106. /* Init */
  1107. BIT_DStream_t bitD1;
  1108. BIT_DStream_t bitD2;
  1109. BIT_DStream_t bitD3;
  1110. BIT_DStream_t bitD4;
  1111. size_t const length1 = MEM_readLE16(istart);
  1112. size_t const length2 = MEM_readLE16(istart+2);
  1113. size_t const length3 = MEM_readLE16(istart+4);
  1114. size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
  1115. const BYTE* const istart1 = istart + 6; /* jumpTable */
  1116. const BYTE* const istart2 = istart1 + length1;
  1117. const BYTE* const istart3 = istart2 + length2;
  1118. const BYTE* const istart4 = istart3 + length3;
  1119. size_t const segmentSize = (dstSize+3) / 4;
  1120. BYTE* const opStart2 = ostart + segmentSize;
  1121. BYTE* const opStart3 = opStart2 + segmentSize;
  1122. BYTE* const opStart4 = opStart3 + segmentSize;
  1123. BYTE* op1 = ostart;
  1124. BYTE* op2 = opStart2;
  1125. BYTE* op3 = opStart3;
  1126. BYTE* op4 = opStart4;
  1127. U32 endSignal = 1;
  1128. DTableDesc const dtd = HUF_getDTableDesc(DTable);
  1129. U32 const dtLog = dtd.tableLog;
  1130. if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
  1131. if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
  1132. CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
  1133. CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
  1134. CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
  1135. CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
  1136. /* 16-32 symbols per loop (4-8 symbols per stream) */
  1137. if ((size_t)(oend - op4) >= sizeof(size_t)) {
  1138. for ( ; (endSignal) & (op4 < olimit); ) {
  1139. #if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
  1140. HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
  1141. HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
  1142. HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
  1143. HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
  1144. HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
  1145. HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
  1146. HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
  1147. HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
  1148. endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
  1149. endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
  1150. HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
  1151. HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
  1152. HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
  1153. HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
  1154. HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
  1155. HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
  1156. HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
  1157. HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
  1158. endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
  1159. endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
  1160. #else
  1161. HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
  1162. HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
  1163. HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
  1164. HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
  1165. HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
  1166. HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
  1167. HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
  1168. HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
  1169. HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
  1170. HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
  1171. HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
  1172. HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
  1173. HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
  1174. HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
  1175. HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
  1176. HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
  1177. endSignal = (U32)LIKELY((U32)
  1178. (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
  1179. & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
  1180. & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
  1181. & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
  1182. #endif
  1183. }
  1184. }
  1185. /* check corruption */
  1186. if (op1 > opStart2) return ERROR(corruption_detected);
  1187. if (op2 > opStart3) return ERROR(corruption_detected);
  1188. if (op3 > opStart4) return ERROR(corruption_detected);
  1189. /* note : op4 already verified within main loop */
  1190. /* finish bitStreams one by one */
  1191. HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
  1192. HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
  1193. HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
  1194. HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
  1195. /* check */
  1196. { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
  1197. if (!endCheck) return ERROR(corruption_detected); }
  1198. /* decoded size */
  1199. return dstSize;
  1200. }
  1201. }
  1202. #if HUF_NEED_BMI2_FUNCTION
  1203. static BMI2_TARGET_ATTRIBUTE
  1204. size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
  1205. size_t cSrcSize, HUF_DTable const* DTable) {
  1206. return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
  1207. }
  1208. #endif
  1209. #if HUF_NEED_DEFAULT_FUNCTION
  1210. static
  1211. size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
  1212. size_t cSrcSize, HUF_DTable const* DTable) {
  1213. return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
  1214. }
  1215. #endif
  1216. #if ZSTD_ENABLE_ASM_X86_64_BMI2
  1217. HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
  1218. static HUF_ASM_X86_64_BMI2_ATTRS size_t
  1219. HUF_decompress4X2_usingDTable_internal_bmi2_asm(
  1220. void* dst, size_t dstSize,
  1221. const void* cSrc, size_t cSrcSize,
  1222. const HUF_DTable* DTable) {
  1223. void const* dt = DTable + 1;
  1224. const BYTE* const iend = (const BYTE*)cSrc + 6;
  1225. BYTE* const oend = (BYTE*)dst + dstSize;
  1226. HUF_DecompressAsmArgs args;
  1227. {
  1228. size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
  1229. FORWARD_IF_ERROR(ret, "Failed to init asm args");
  1230. if (ret != 0)
  1231. return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
  1232. }
  1233. assert(args.ip[0] >= args.ilimit);
  1234. HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(&args);
  1235. /* note : op4 already verified within main loop */
  1236. assert(args.ip[0] >= iend);
  1237. assert(args.ip[1] >= iend);
  1238. assert(args.ip[2] >= iend);
  1239. assert(args.ip[3] >= iend);
  1240. assert(args.op[3] <= oend);
  1241. (void)iend;
  1242. /* finish bitStreams one by one */
  1243. {
  1244. size_t const segmentSize = (dstSize+3) / 4;
  1245. BYTE* segmentEnd = (BYTE*)dst;
  1246. int i;
  1247. for (i = 0; i < 4; ++i) {
  1248. BIT_DStream_t bit;
  1249. if (segmentSize <= (size_t)(oend - segmentEnd))
  1250. segmentEnd += segmentSize;
  1251. else
  1252. segmentEnd = oend;
  1253. FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
  1254. args.op[i] += HUF_decodeStreamX2(args.op[i], &bit, segmentEnd, (HUF_DEltX2 const*)dt, HUF_DECODER_FAST_TABLELOG);
  1255. if (args.op[i] != segmentEnd)
  1256. return ERROR(corruption_detected);
  1257. }
  1258. }
  1259. /* decoded size */
  1260. return dstSize;
  1261. }
  1262. #endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */
  1263. static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
  1264. size_t cSrcSize, HUF_DTable const* DTable, int bmi2)
  1265. {
  1266. #if DYNAMIC_BMI2
  1267. if (bmi2) {
  1268. # if ZSTD_ENABLE_ASM_X86_64_BMI2
  1269. return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
  1270. # else
  1271. return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
  1272. # endif
  1273. }
  1274. #else
  1275. (void)bmi2;
  1276. #endif
  1277. #if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
  1278. return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
  1279. #else
  1280. return HUF_decompress4X2_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable);
  1281. #endif
  1282. }
  1283. HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
  1284. size_t HUF_decompress1X2_usingDTable(
  1285. void* dst, size_t dstSize,
  1286. const void* cSrc, size_t cSrcSize,
  1287. const HUF_DTable* DTable)
  1288. {
  1289. DTableDesc dtd = HUF_getDTableDesc(DTable);
  1290. if (dtd.tableType != 1) return ERROR(GENERIC);
  1291. return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
  1292. }
  1293. size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
  1294. const void* cSrc, size_t cSrcSize,
  1295. void* workSpace, size_t wkspSize)
  1296. {
  1297. const BYTE* ip = (const BYTE*) cSrc;
  1298. size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
  1299. workSpace, wkspSize);
  1300. if (HUF_isError(hSize)) return hSize;
  1301. if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
  1302. ip += hSize; cSrcSize -= hSize;
  1303. return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
  1304. }
  1305. size_t HUF_decompress4X2_usingDTable(
  1306. void* dst, size_t dstSize,
  1307. const void* cSrc, size_t cSrcSize,
  1308. const HUF_DTable* DTable)
  1309. {
  1310. DTableDesc dtd = HUF_getDTableDesc(DTable);
  1311. if (dtd.tableType != 1) return ERROR(GENERIC);
  1312. return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
  1313. }
  1314. static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
  1315. const void* cSrc, size_t cSrcSize,
  1316. void* workSpace, size_t wkspSize, int bmi2)
  1317. {
  1318. const BYTE* ip = (const BYTE*) cSrc;
  1319. size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
  1320. workSpace, wkspSize);
  1321. if (HUF_isError(hSize)) return hSize;
  1322. if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
  1323. ip += hSize; cSrcSize -= hSize;
  1324. return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
  1325. }
  1326. size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
  1327. const void* cSrc, size_t cSrcSize,
  1328. void* workSpace, size_t wkspSize)
  1329. {
  1330. return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
  1331. }
  1332. #endif /* HUF_FORCE_DECOMPRESS_X1 */
  1333. /* ***********************************/
  1334. /* Universal decompression selectors */
  1335. /* ***********************************/
  1336. size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
  1337. const void* cSrc, size_t cSrcSize,
  1338. const HUF_DTable* DTable)
  1339. {
  1340. DTableDesc const dtd = HUF_getDTableDesc(DTable);
  1341. #if defined(HUF_FORCE_DECOMPRESS_X1)
  1342. (void)dtd;
  1343. assert(dtd.tableType == 0);
  1344. return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
  1345. #elif defined(HUF_FORCE_DECOMPRESS_X2)
  1346. (void)dtd;
  1347. assert(dtd.tableType == 1);
  1348. return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
  1349. #else
  1350. return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
  1351. HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
  1352. #endif
  1353. }
  1354. size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
  1355. const void* cSrc, size_t cSrcSize,
  1356. const HUF_DTable* DTable)
  1357. {
  1358. DTableDesc const dtd = HUF_getDTableDesc(DTable);
  1359. #if defined(HUF_FORCE_DECOMPRESS_X1)
  1360. (void)dtd;
  1361. assert(dtd.tableType == 0);
  1362. return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
  1363. #elif defined(HUF_FORCE_DECOMPRESS_X2)
  1364. (void)dtd;
  1365. assert(dtd.tableType == 1);
  1366. return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
  1367. #else
  1368. return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
  1369. HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
  1370. #endif
  1371. }
  1372. #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
  1373. typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
  1374. static const algo_time_t algoTime[16 /* Quantization */][2 /* single, double */] =
  1375. {
  1376. /* single, double, quad */
  1377. {{0,0}, {1,1}}, /* Q==0 : impossible */
  1378. {{0,0}, {1,1}}, /* Q==1 : impossible */
  1379. {{ 150,216}, { 381,119}}, /* Q == 2 : 12-18% */
  1380. {{ 170,205}, { 514,112}}, /* Q == 3 : 18-25% */
  1381. {{ 177,199}, { 539,110}}, /* Q == 4 : 25-32% */
  1382. {{ 197,194}, { 644,107}}, /* Q == 5 : 32-38% */
  1383. {{ 221,192}, { 735,107}}, /* Q == 6 : 38-44% */
  1384. {{ 256,189}, { 881,106}}, /* Q == 7 : 44-50% */
  1385. {{ 359,188}, {1167,109}}, /* Q == 8 : 50-56% */
  1386. {{ 582,187}, {1570,114}}, /* Q == 9 : 56-62% */
  1387. {{ 688,187}, {1712,122}}, /* Q ==10 : 62-69% */
  1388. {{ 825,186}, {1965,136}}, /* Q ==11 : 69-75% */
  1389. {{ 976,185}, {2131,150}}, /* Q ==12 : 75-81% */
  1390. {{1180,186}, {2070,175}}, /* Q ==13 : 81-87% */
  1391. {{1377,185}, {1731,202}}, /* Q ==14 : 87-93% */
  1392. {{1412,185}, {1695,202}}, /* Q ==15 : 93-99% */
  1393. };
  1394. #endif
  1395. /* HUF_selectDecoder() :
  1396. * Tells which decoder is likely to decode faster,
  1397. * based on a set of pre-computed metrics.
  1398. * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
  1399. * Assumption : 0 < dstSize <= 128 KB */
  1400. U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
  1401. {
  1402. assert(dstSize > 0);
  1403. assert(dstSize <= 128*1024);
  1404. #if defined(HUF_FORCE_DECOMPRESS_X1)
  1405. (void)dstSize;
  1406. (void)cSrcSize;
  1407. return 0;
  1408. #elif defined(HUF_FORCE_DECOMPRESS_X2)
  1409. (void)dstSize;
  1410. (void)cSrcSize;
  1411. return 1;
  1412. #else
  1413. /* decoder timing evaluation */
  1414. { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
  1415. U32 const D256 = (U32)(dstSize >> 8);
  1416. U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
  1417. U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
  1418. DTime1 += DTime1 >> 5; /* small advantage to algorithm using less memory, to reduce cache eviction */
  1419. return DTime1 < DTime0;
  1420. }
  1421. #endif
  1422. }
  1423. size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
  1424. size_t dstSize, const void* cSrc,
  1425. size_t cSrcSize, void* workSpace,
  1426. size_t wkspSize)
  1427. {
  1428. /* validation checks */
  1429. if (dstSize == 0) return ERROR(dstSize_tooSmall);
  1430. if (cSrcSize == 0) return ERROR(corruption_detected);
  1431. { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
  1432. #if defined(HUF_FORCE_DECOMPRESS_X1)
  1433. (void)algoNb;
  1434. assert(algoNb == 0);
  1435. return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
  1436. #elif defined(HUF_FORCE_DECOMPRESS_X2)
  1437. (void)algoNb;
  1438. assert(algoNb == 1);
  1439. return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
  1440. #else
  1441. return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
  1442. cSrcSize, workSpace, wkspSize):
  1443. HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
  1444. #endif
  1445. }
  1446. }
  1447. size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
  1448. const void* cSrc, size_t cSrcSize,
  1449. void* workSpace, size_t wkspSize)
  1450. {
  1451. /* validation checks */
  1452. if (dstSize == 0) return ERROR(dstSize_tooSmall);
  1453. if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
  1454. if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
  1455. if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
  1456. { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
  1457. #if defined(HUF_FORCE_DECOMPRESS_X1)
  1458. (void)algoNb;
  1459. assert(algoNb == 0);
  1460. return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
  1461. cSrcSize, workSpace, wkspSize);
  1462. #elif defined(HUF_FORCE_DECOMPRESS_X2)
  1463. (void)algoNb;
  1464. assert(algoNb == 1);
  1465. return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
  1466. cSrcSize, workSpace, wkspSize);
  1467. #else
  1468. return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
  1469. cSrcSize, workSpace, wkspSize):
  1470. HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
  1471. cSrcSize, workSpace, wkspSize);
  1472. #endif
  1473. }
  1474. }
  1475. size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
  1476. {
  1477. DTableDesc const dtd = HUF_getDTableDesc(DTable);
  1478. #if defined(HUF_FORCE_DECOMPRESS_X1)
  1479. (void)dtd;
  1480. assert(dtd.tableType == 0);
  1481. return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
  1482. #elif defined(HUF_FORCE_DECOMPRESS_X2)
  1483. (void)dtd;
  1484. assert(dtd.tableType == 1);
  1485. return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
  1486. #else
  1487. return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
  1488. HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
  1489. #endif
  1490. }
  1491. #ifndef HUF_FORCE_DECOMPRESS_X2
  1492. size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
  1493. {
  1494. const BYTE* ip = (const BYTE*) cSrc;
  1495. size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
  1496. if (HUF_isError(hSize)) return hSize;
  1497. if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
  1498. ip += hSize; cSrcSize -= hSize;
  1499. return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
  1500. }
  1501. #endif
  1502. size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
  1503. {
  1504. DTableDesc const dtd = HUF_getDTableDesc(DTable);
  1505. #if defined(HUF_FORCE_DECOMPRESS_X1)
  1506. (void)dtd;
  1507. assert(dtd.tableType == 0);
  1508. return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
  1509. #elif defined(HUF_FORCE_DECOMPRESS_X2)
  1510. (void)dtd;
  1511. assert(dtd.tableType == 1);
  1512. return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
  1513. #else
  1514. return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
  1515. HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
  1516. #endif
  1517. }
  1518. size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
  1519. {
  1520. /* validation checks */
  1521. if (dstSize == 0) return ERROR(dstSize_tooSmall);
  1522. if (cSrcSize == 0) return ERROR(corruption_detected);
  1523. { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
  1524. #if defined(HUF_FORCE_DECOMPRESS_X1)
  1525. (void)algoNb;
  1526. assert(algoNb == 0);
  1527. return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
  1528. #elif defined(HUF_FORCE_DECOMPRESS_X2)
  1529. (void)algoNb;
  1530. assert(algoNb == 1);
  1531. return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
  1532. #else
  1533. return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
  1534. HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
  1535. #endif
  1536. }
  1537. }