/root/doris/be/src/exec/decompressor.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "exec/decompressor.h" |
19 | | |
20 | | #include <strings.h> |
21 | | |
22 | | #include <memory> |
23 | | #include <ostream> |
24 | | |
25 | | #include "common/logging.h" |
26 | | #include "common/status.h" |
27 | | #include "gutil/endian.h" |
28 | | #include "gutil/strings/substitute.h" |
29 | | |
30 | | namespace doris { |
31 | | |
32 | | Status Decompressor::create_decompressor(CompressType type, |
33 | 0 | std::unique_ptr<Decompressor>* decompressor) { |
34 | 0 | switch (type) { |
35 | 0 | case CompressType::UNCOMPRESSED: |
36 | 0 | decompressor->reset(nullptr); |
37 | 0 | break; |
38 | 0 | case CompressType::GZIP: |
39 | 0 | decompressor->reset(new GzipDecompressor(false)); |
40 | 0 | break; |
41 | 0 | case CompressType::DEFLATE: |
42 | 0 | decompressor->reset(new GzipDecompressor(true)); |
43 | 0 | break; |
44 | 0 | case CompressType::BZIP2: |
45 | 0 | decompressor->reset(new Bzip2Decompressor()); |
46 | 0 | break; |
47 | 0 | case CompressType::ZSTD: |
48 | 0 | decompressor->reset(new ZstdDecompressor()); |
49 | 0 | break; |
50 | 0 | case CompressType::LZ4FRAME: |
51 | 0 | decompressor->reset(new Lz4FrameDecompressor()); |
52 | 0 | break; |
53 | 0 | case CompressType::LZ4BLOCK: |
54 | 0 | decompressor->reset(new Lz4BlockDecompressor()); |
55 | 0 | break; |
56 | 0 | case CompressType::SNAPPYBLOCK: |
57 | 0 | decompressor->reset(new SnappyBlockDecompressor()); |
58 | 0 | break; |
59 | 0 | case CompressType::LZOP: |
60 | 0 | decompressor->reset(new LzopDecompressor()); |
61 | 0 | break; |
62 | 0 | default: |
63 | 0 | return Status::InternalError("Unknown compress type: {}", type); |
64 | 0 | } |
65 | | |
66 | 0 | Status st = Status::OK(); |
67 | 0 | if (*decompressor != nullptr) { |
68 | 0 | st = (*decompressor)->init(); |
69 | 0 | } |
70 | |
|
71 | 0 | return st; |
72 | 0 | } |
73 | | |
74 | | Status Decompressor::create_decompressor(TFileCompressType::type type, |
75 | 0 | std::unique_ptr<Decompressor>* decompressor) { |
76 | 0 | CompressType compress_type; |
77 | 0 | switch (type) { |
78 | 0 | case TFileCompressType::PLAIN: |
79 | 0 | case TFileCompressType::UNKNOWN: |
80 | 0 | compress_type = CompressType::UNCOMPRESSED; |
81 | 0 | break; |
82 | 0 | case TFileCompressType::GZ: |
83 | 0 | compress_type = CompressType::GZIP; |
84 | 0 | break; |
85 | 0 | case TFileCompressType::LZO: |
86 | 0 | case TFileCompressType::LZOP: |
87 | 0 | compress_type = CompressType::LZOP; |
88 | 0 | break; |
89 | 0 | case TFileCompressType::BZ2: |
90 | 0 | compress_type = CompressType::BZIP2; |
91 | 0 | break; |
92 | 0 | case TFileCompressType::ZSTD: |
93 | 0 | compress_type = CompressType::ZSTD; |
94 | 0 | break; |
95 | 0 | case TFileCompressType::LZ4FRAME: |
96 | 0 | compress_type = CompressType::LZ4FRAME; |
97 | 0 | break; |
98 | 0 | case TFileCompressType::LZ4BLOCK: |
99 | 0 | compress_type = CompressType::LZ4BLOCK; |
100 | 0 | break; |
101 | 0 | case TFileCompressType::DEFLATE: |
102 | 0 | compress_type = CompressType::DEFLATE; |
103 | 0 | break; |
104 | 0 | case TFileCompressType::SNAPPYBLOCK: |
105 | 0 | compress_type = CompressType::SNAPPYBLOCK; |
106 | 0 | break; |
107 | 0 | default: |
108 | 0 | return Status::InternalError<false>("unknown compress type: {}", type); |
109 | 0 | } |
110 | 0 | RETURN_IF_ERROR(Decompressor::create_decompressor(compress_type, decompressor)); |
111 | | |
112 | 0 | return Status::OK(); |
113 | 0 | } |
114 | | |
115 | | Status Decompressor::create_decompressor(TFileFormatType::type type, |
116 | 0 | std::unique_ptr<Decompressor>* decompressor) { |
117 | 0 | CompressType compress_type; |
118 | 0 | switch (type) { |
119 | 0 | case TFileFormatType::FORMAT_PROTO: |
120 | 0 | [[fallthrough]]; |
121 | 0 | case TFileFormatType::FORMAT_CSV_PLAIN: |
122 | 0 | compress_type = CompressType::UNCOMPRESSED; |
123 | 0 | break; |
124 | 0 | case TFileFormatType::FORMAT_CSV_GZ: |
125 | 0 | compress_type = CompressType::GZIP; |
126 | 0 | break; |
127 | 0 | case TFileFormatType::FORMAT_CSV_BZ2: |
128 | 0 | compress_type = CompressType::BZIP2; |
129 | 0 | break; |
130 | 0 | case TFileFormatType::FORMAT_CSV_LZ4FRAME: |
131 | 0 | compress_type = CompressType::LZ4FRAME; |
132 | 0 | break; |
133 | 0 | case TFileFormatType::FORMAT_CSV_LZ4BLOCK: |
134 | 0 | compress_type = CompressType::LZ4BLOCK; |
135 | 0 | break; |
136 | 0 | case TFileFormatType::FORMAT_CSV_LZOP: |
137 | 0 | compress_type = CompressType::LZOP; |
138 | 0 | break; |
139 | 0 | case TFileFormatType::FORMAT_CSV_DEFLATE: |
140 | 0 | compress_type = CompressType::DEFLATE; |
141 | 0 | break; |
142 | 0 | case TFileFormatType::FORMAT_CSV_SNAPPYBLOCK: |
143 | 0 | compress_type = CompressType::SNAPPYBLOCK; |
144 | 0 | break; |
145 | 0 | default: |
146 | 0 | return Status::InternalError<false>("unknown compress type: {}", type); |
147 | 0 | } |
148 | 0 | RETURN_IF_ERROR(Decompressor::create_decompressor(compress_type, decompressor)); |
149 | | |
150 | 0 | return Status::OK(); |
151 | 0 | } |
152 | | |
153 | 0 | uint32_t Decompressor::_read_int32(uint8_t* buf) { |
154 | 0 | return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; |
155 | 0 | } |
156 | | |
157 | 0 | std::string Decompressor::debug_info() { |
158 | 0 | return "Decompressor"; |
159 | 0 | } |
160 | | |
161 | | // Gzip |
162 | | GzipDecompressor::GzipDecompressor(bool is_deflate) |
163 | | : Decompressor(is_deflate ? CompressType::DEFLATE : CompressType::GZIP), |
164 | 0 | _is_deflate(is_deflate) {} |
165 | | |
166 | 0 | GzipDecompressor::~GzipDecompressor() { |
167 | 0 | (void)inflateEnd(&_z_strm); |
168 | 0 | } |
169 | | |
170 | 0 | Status GzipDecompressor::init() { |
171 | 0 | _z_strm = {}; |
172 | 0 | _z_strm.zalloc = Z_NULL; |
173 | 0 | _z_strm.zfree = Z_NULL; |
174 | 0 | _z_strm.opaque = Z_NULL; |
175 | |
|
176 | 0 | int window_bits = _is_deflate ? WINDOW_BITS : (WINDOW_BITS | DETECT_CODEC); |
177 | 0 | int ret = inflateInit2(&_z_strm, window_bits); |
178 | 0 | if (ret < 0) { |
179 | 0 | return Status::InternalError("Failed to init inflate. status code: {}", ret); |
180 | 0 | } |
181 | | |
182 | 0 | return Status::OK(); |
183 | 0 | } |
184 | | |
185 | | Status GzipDecompressor::decompress(uint8_t* input, size_t input_len, size_t* input_bytes_read, |
186 | | uint8_t* output, size_t output_max_len, |
187 | | size_t* decompressed_len, bool* stream_end, |
188 | 0 | size_t* more_input_bytes, size_t* more_output_bytes) { |
189 | | // 1. set input and output |
190 | 0 | _z_strm.next_in = input; |
191 | 0 | _z_strm.avail_in = input_len; |
192 | 0 | _z_strm.next_out = output; |
193 | 0 | _z_strm.avail_out = output_max_len; |
194 | |
|
195 | 0 | while (_z_strm.avail_out > 0 && _z_strm.avail_in > 0) { |
196 | 0 | *stream_end = false; |
197 | | // inflate() performs one or both of the following actions: |
198 | | // Decompress more input starting at next_in and update next_in and avail_in |
199 | | // accordingly. |
200 | | // Provide more output starting at next_out and update next_out and avail_out |
201 | | // accordingly. |
202 | | // inflate() returns Z_OK if some progress has been made (more input processed |
203 | | // or more output produced) |
204 | |
|
205 | 0 | int ret = inflate(&_z_strm, Z_NO_FLUSH); |
206 | 0 | *input_bytes_read = input_len - _z_strm.avail_in; |
207 | 0 | *decompressed_len = output_max_len - _z_strm.avail_out; |
208 | |
|
209 | 0 | VLOG_TRACE << "gzip dec ret: " << ret << " input_bytes_read: " << *input_bytes_read |
210 | 0 | << " decompressed_len: " << *decompressed_len; |
211 | |
|
212 | 0 | if (ret == Z_BUF_ERROR) { |
213 | | // Z_BUF_ERROR indicates that inflate() could not consume more input or |
214 | | // produce more output. inflate() can be called again with more output space |
215 | | // or more available input |
216 | | // ATTN: even if ret == Z_OK, decompressed_len may also be zero |
217 | 0 | return Status::OK(); |
218 | 0 | } else if (ret == Z_STREAM_END) { |
219 | 0 | *stream_end = true; |
220 | | // reset _z_strm to continue decoding a subsequent gzip stream |
221 | 0 | ret = inflateReset(&_z_strm); |
222 | 0 | if (ret != Z_OK) { |
223 | 0 | return Status::InternalError("Failed to inflateReset. return code: {}", ret); |
224 | 0 | } |
225 | 0 | } else if (ret != Z_OK) { |
226 | 0 | return Status::InternalError("Failed to inflate. return code: {}", ret); |
227 | 0 | } else { |
228 | | // here ret must be Z_OK. |
229 | | // we continue if avail_out and avail_in > 0. |
230 | | // this means 'inflate' is not done yet. |
231 | 0 | } |
232 | 0 | } |
233 | | |
234 | 0 | return Status::OK(); |
235 | 0 | } |
236 | | |
237 | 0 | std::string GzipDecompressor::debug_info() { |
238 | 0 | std::stringstream ss; |
239 | 0 | ss << "GzipDecompressor." |
240 | 0 | << " is_deflate: " << _is_deflate; |
241 | 0 | return ss.str(); |
242 | 0 | } |
243 | | |
244 | | // Bzip2 |
245 | 0 | Bzip2Decompressor::~Bzip2Decompressor() { |
246 | 0 | BZ2_bzDecompressEnd(&_bz_strm); |
247 | 0 | } |
248 | | |
249 | 0 | Status Bzip2Decompressor::init() { |
250 | 0 | bzero(&_bz_strm, sizeof(_bz_strm)); |
251 | 0 | int ret = BZ2_bzDecompressInit(&_bz_strm, 0, 0); |
252 | 0 | if (ret != BZ_OK) { |
253 | 0 | return Status::InternalError("Failed to init bz2. status code: {}", ret); |
254 | 0 | } |
255 | | |
256 | 0 | return Status::OK(); |
257 | 0 | } |
258 | | |
259 | | Status Bzip2Decompressor::decompress(uint8_t* input, size_t input_len, size_t* input_bytes_read, |
260 | | uint8_t* output, size_t output_max_len, |
261 | | size_t* decompressed_len, bool* stream_end, |
262 | 0 | size_t* more_input_bytes, size_t* more_output_bytes) { |
263 | | // 1. set input and output |
264 | 0 | _bz_strm.next_in = const_cast<char*>(reinterpret_cast<const char*>(input)); |
265 | 0 | _bz_strm.avail_in = input_len; |
266 | 0 | _bz_strm.next_out = reinterpret_cast<char*>(output); |
267 | 0 | _bz_strm.avail_out = output_max_len; |
268 | |
|
269 | 0 | while (_bz_strm.avail_out > 0 && _bz_strm.avail_in > 0) { |
270 | 0 | *stream_end = false; |
271 | | // decompress |
272 | 0 | int ret = BZ2_bzDecompress(&_bz_strm); |
273 | 0 | *input_bytes_read = input_len - _bz_strm.avail_in; |
274 | 0 | *decompressed_len = output_max_len - _bz_strm.avail_out; |
275 | |
|
276 | 0 | if (ret == BZ_DATA_ERROR || ret == BZ_DATA_ERROR_MAGIC) { |
277 | 0 | LOG(INFO) << "input_bytes_read: " << *input_bytes_read |
278 | 0 | << " decompressed_len: " << *decompressed_len; |
279 | 0 | return Status::InternalError("Failed to bz2 decompress. status code: {}", ret); |
280 | 0 | } else if (ret == BZ_STREAM_END) { |
281 | 0 | *stream_end = true; |
282 | 0 | ret = BZ2_bzDecompressEnd(&_bz_strm); |
283 | 0 | if (ret != BZ_OK) { |
284 | 0 | return Status::InternalError( |
285 | 0 | "Failed to end bz2 after meet BZ_STREAM_END. status code: {}", ret); |
286 | 0 | } |
287 | | |
288 | 0 | ret = BZ2_bzDecompressInit(&_bz_strm, 0, 0); |
289 | 0 | if (ret != BZ_OK) { |
290 | 0 | return Status::InternalError( |
291 | 0 | "Failed to init bz2 after meet BZ_STREAM_END. status code: {}", ret); |
292 | 0 | } |
293 | 0 | } else if (ret != BZ_OK) { |
294 | 0 | return Status::InternalError("Failed to bz2 decompress. status code: {}", ret); |
295 | 0 | } else { |
296 | | // continue |
297 | 0 | } |
298 | 0 | } |
299 | | |
300 | 0 | return Status::OK(); |
301 | 0 | } |
302 | | |
303 | 0 | std::string Bzip2Decompressor::debug_info() { |
304 | 0 | std::stringstream ss; |
305 | 0 | ss << "Bzip2Decompressor."; |
306 | 0 | return ss.str(); |
307 | 0 | } |
308 | | |
309 | 0 | ZstdDecompressor::~ZstdDecompressor() { |
310 | 0 | ZSTD_freeDStream(_zstd_strm); |
311 | 0 | } |
312 | | |
313 | 0 | Status ZstdDecompressor::init() { |
314 | 0 | _zstd_strm = ZSTD_createDStream(); |
315 | 0 | if (!_zstd_strm) { |
316 | 0 | std::stringstream ss; |
317 | 0 | return Status::InternalError("ZSTD_dctx creation error"); |
318 | 0 | } |
319 | 0 | auto ret = ZSTD_initDStream(_zstd_strm); |
320 | 0 | if (ZSTD_isError(ret)) { |
321 | 0 | return Status::InternalError("ZSTD_initDStream error: {}", ZSTD_getErrorName(ret)); |
322 | 0 | } |
323 | 0 | return Status::OK(); |
324 | 0 | } |
325 | | |
326 | | Status ZstdDecompressor::decompress(uint8_t* input, size_t input_len, size_t* input_bytes_read, |
327 | | uint8_t* output, size_t output_max_len, |
328 | | size_t* decompressed_len, bool* stream_end, |
329 | 0 | size_t* more_input_bytes, size_t* more_output_bytes) { |
330 | | // 1. set input and output |
331 | 0 | ZSTD_inBuffer inputBuffer = {input, input_len, 0}; |
332 | 0 | ZSTD_outBuffer outputBuffer = {output, output_max_len, 0}; |
333 | | |
334 | | // decompress |
335 | 0 | int ret = ZSTD_decompressStream(_zstd_strm, &outputBuffer, &inputBuffer); |
336 | 0 | *input_bytes_read = inputBuffer.pos; |
337 | 0 | *decompressed_len = outputBuffer.pos; |
338 | |
|
339 | 0 | if (ZSTD_isError(ret)) { |
340 | 0 | return Status::InternalError("Failed to zstd decompress: {}", ZSTD_getErrorName(ret)); |
341 | 0 | } |
342 | | |
343 | 0 | *stream_end = ret == 0; |
344 | 0 | return Status::OK(); |
345 | 0 | } |
346 | | |
347 | 0 | std::string ZstdDecompressor::debug_info() { |
348 | 0 | std::stringstream ss; |
349 | 0 | ss << "ZstdDecompressor."; |
350 | 0 | return ss.str(); |
351 | 0 | } |
352 | | |
353 | | // Lz4Frame |
354 | | // Lz4 version: 1.7.5 |
355 | | // define LZ4F_VERSION = 100 |
356 | | const unsigned Lz4FrameDecompressor::DORIS_LZ4F_VERSION = 100; |
357 | | |
358 | 0 | Lz4FrameDecompressor::~Lz4FrameDecompressor() { |
359 | 0 | LZ4F_freeDecompressionContext(_dctx); |
360 | 0 | } |
361 | | |
362 | 0 | Status Lz4FrameDecompressor::init() { |
363 | 0 | size_t ret = LZ4F_createDecompressionContext(&_dctx, DORIS_LZ4F_VERSION); |
364 | 0 | if (LZ4F_isError(ret)) { |
365 | 0 | std::stringstream ss; |
366 | 0 | ss << "LZ4F_dctx creation error: " << std::string(LZ4F_getErrorName(ret)); |
367 | 0 | return Status::InternalError(ss.str()); |
368 | 0 | } |
369 | | |
370 | | // init as -1 |
371 | 0 | _expect_dec_buf_size = -1; |
372 | |
|
373 | 0 | return Status::OK(); |
374 | 0 | } |
375 | | |
376 | | Status Lz4FrameDecompressor::decompress(uint8_t* input, size_t input_len, size_t* input_bytes_read, |
377 | | uint8_t* output, size_t output_max_len, |
378 | | size_t* decompressed_len, bool* stream_end, |
379 | 0 | size_t* more_input_bytes, size_t* more_output_bytes) { |
380 | 0 | uint8_t* src = input; |
381 | 0 | size_t remaining_input_size = input_len; |
382 | 0 | size_t ret = 1; |
383 | 0 | *input_bytes_read = 0; |
384 | |
|
385 | 0 | if (_expect_dec_buf_size == -1) { |
386 | | // init expected decompress buf size, and check if output_max_len is large enough |
387 | | // ATTN: _expect_dec_buf_size is uninit, which means this is the first time to call |
388 | | // decompress(), so *input* should point to the head of the compressed file, |
389 | | // where lz4 header section is there. |
390 | |
|
391 | 0 | if (input_len < 15) { |
392 | 0 | return Status::InternalError( |
393 | 0 | "Lz4 header size is between 7 and 15 bytes. " |
394 | 0 | "but input size is only: {}", |
395 | 0 | input_len); |
396 | 0 | } |
397 | | |
398 | 0 | LZ4F_frameInfo_t info; |
399 | 0 | ret = LZ4F_getFrameInfo(_dctx, &info, (void*)src, &remaining_input_size); |
400 | 0 | if (LZ4F_isError(ret)) { |
401 | 0 | return Status::InternalError("LZ4F_getFrameInfo error: {}", |
402 | 0 | std::string(LZ4F_getErrorName(ret))); |
403 | 0 | } |
404 | | |
405 | 0 | _expect_dec_buf_size = get_block_size(&info); |
406 | 0 | if (_expect_dec_buf_size == -1) { |
407 | 0 | return Status::InternalError( |
408 | 0 | "Impossible lz4 block size unless more block sizes are allowed {}", |
409 | 0 | std::string(LZ4F_getErrorName(ret))); |
410 | 0 | } |
411 | | |
412 | 0 | *input_bytes_read = remaining_input_size; |
413 | |
|
414 | 0 | src += remaining_input_size; |
415 | 0 | remaining_input_size = input_len - remaining_input_size; |
416 | |
|
417 | 0 | LOG(INFO) << "lz4 block size: " << _expect_dec_buf_size; |
418 | 0 | } |
419 | | |
420 | | // decompress |
421 | 0 | size_t output_len = output_max_len; |
422 | 0 | ret = LZ4F_decompress(_dctx, (void*)output, &output_len, (void*)src, &remaining_input_size, |
423 | 0 | /* LZ4F_decompressOptions_t */ nullptr); |
424 | 0 | if (LZ4F_isError(ret)) { |
425 | 0 | return Status::InternalError("Decompression error: {}", |
426 | 0 | std::string(LZ4F_getErrorName(ret))); |
427 | 0 | } |
428 | | |
429 | | // update |
430 | 0 | *input_bytes_read += remaining_input_size; |
431 | 0 | *decompressed_len = output_len; |
432 | 0 | if (ret == 0) { |
433 | 0 | *stream_end = true; |
434 | 0 | } else { |
435 | 0 | *stream_end = false; |
436 | 0 | } |
437 | |
|
438 | 0 | return Status::OK(); |
439 | 0 | } |
440 | | |
441 | 0 | std::string Lz4FrameDecompressor::debug_info() { |
442 | 0 | std::stringstream ss; |
443 | 0 | ss << "Lz4FrameDecompressor." |
444 | 0 | << " expect dec buf size: " << _expect_dec_buf_size |
445 | 0 | << " Lz4 Frame Version: " << DORIS_LZ4F_VERSION; |
446 | 0 | return ss.str(); |
447 | 0 | } |
448 | | |
449 | 0 | size_t Lz4FrameDecompressor::get_block_size(const LZ4F_frameInfo_t* info) { |
450 | 0 | switch (info->blockSizeID) { |
451 | 0 | case LZ4F_default: |
452 | 0 | case LZ4F_max64KB: |
453 | 0 | return 1 << 16; |
454 | 0 | case LZ4F_max256KB: |
455 | 0 | return 1 << 18; |
456 | 0 | case LZ4F_max1MB: |
457 | 0 | return 1 << 20; |
458 | 0 | case LZ4F_max4MB: |
459 | 0 | return 1 << 22; |
460 | 0 | default: |
461 | | // error |
462 | 0 | return -1; |
463 | 0 | } |
464 | 0 | } |
465 | | |
466 | | /// Lz4BlockDecompressor |
467 | 0 | Status Lz4BlockDecompressor::init() { |
468 | 0 | return Status::OK(); |
469 | 0 | } |
470 | | |
471 | | // Hadoop lz4codec source : |
472 | | // https://github.com/apache/hadoop/blob/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/Lz4Codec.cc |
473 | | // Example: |
474 | | // OriginData(The original data will be divided into several large data block.) : |
475 | | // large data block1 | large data block2 | large data block3 | .... |
476 | | // The large data block will be divided into several small data block. |
477 | | // Suppose a large data block is divided into three small blocks: |
478 | | // large data block1: | small block1 | small block2 | small block3 | |
479 | | // CompressData: <A [B1 compress(small block1) ] [B2 compress(small block1) ] [B3 compress(small block1)]> |
480 | | // |
481 | | // A : original length of the current block of large data block. |
482 | | // sizeof(A) = 4 bytes. |
483 | | // A = length(small block1) + length(small block2) + length(small block3) |
484 | | // Bx : length of small data block bx. |
485 | | // sizeof(Bx) = 4 bytes. |
486 | | // Bx = length(compress(small blockx)) |
487 | | Status Lz4BlockDecompressor::decompress(uint8_t* input, size_t input_len, size_t* input_bytes_read, |
488 | | uint8_t* output, size_t output_max_len, |
489 | | size_t* decompressed_len, bool* stream_end, |
490 | 0 | size_t* more_input_bytes, size_t* more_output_bytes) { |
491 | 0 | auto* input_ptr = input; |
492 | 0 | auto* output_ptr = output; |
493 | |
|
494 | 0 | while (input_len > 0) { |
495 | | //if faild , fall back to large block begin |
496 | 0 | auto* large_block_input_ptr = input_ptr; |
497 | 0 | auto* large_block_output_ptr = output_ptr; |
498 | |
|
499 | 0 | if (input_len < sizeof(uint32_t)) { |
500 | 0 | return Status::InvalidArgument(strings::Substitute( |
501 | 0 | "fail to do hadoop-lz4 decompress, input_len=$0", input_len)); |
502 | 0 | } |
503 | | |
504 | 0 | uint32_t remaining_decompressed_large_block_len = BigEndian::Load32(input_ptr); |
505 | |
|
506 | 0 | input_ptr += sizeof(uint32_t); |
507 | 0 | input_len -= sizeof(uint32_t); |
508 | |
|
509 | 0 | std::size_t remaining_output_len = output_max_len - *decompressed_len; |
510 | |
|
511 | 0 | if (remaining_output_len < remaining_decompressed_large_block_len) { |
512 | | // Need more output buffer |
513 | 0 | *more_output_bytes = remaining_decompressed_large_block_len - remaining_output_len; |
514 | 0 | input_ptr = large_block_input_ptr; |
515 | 0 | output_ptr = large_block_output_ptr; |
516 | |
|
517 | 0 | break; |
518 | 0 | } |
519 | | |
520 | 0 | std::size_t decompressed_large_block_len = 0; |
521 | 0 | while (remaining_decompressed_large_block_len > 0) { |
522 | | // Check that input length should not be negative. |
523 | 0 | if (input_len < sizeof(uint32_t)) { |
524 | 0 | *more_input_bytes = sizeof(uint32_t) - input_len; |
525 | 0 | break; |
526 | 0 | } |
527 | | |
528 | | // Read the length of the next lz4 compressed block. |
529 | 0 | size_t compressed_small_block_len = BigEndian::Load32(input_ptr); |
530 | |
|
531 | 0 | input_ptr += sizeof(uint32_t); |
532 | 0 | input_len -= sizeof(uint32_t); |
533 | |
|
534 | 0 | if (compressed_small_block_len == 0) { |
535 | 0 | continue; |
536 | 0 | } |
537 | | |
538 | 0 | if (compressed_small_block_len > input_len) { |
539 | | // Need more input buffer |
540 | 0 | *more_input_bytes = compressed_small_block_len - input_len; |
541 | 0 | break; |
542 | 0 | } |
543 | | |
544 | | // Decompress this block. |
545 | 0 | auto decompressed_small_block_len = LZ4_decompress_safe( |
546 | 0 | reinterpret_cast<const char*>(input_ptr), reinterpret_cast<char*>(output_ptr), |
547 | 0 | compressed_small_block_len, remaining_output_len); |
548 | 0 | if (decompressed_small_block_len < 0) { |
549 | 0 | return Status::InvalidArgument("fail to do LZ4 decompress, error = {}", |
550 | 0 | LZ4F_getErrorName(decompressed_small_block_len)); |
551 | 0 | } |
552 | 0 | input_ptr += compressed_small_block_len; |
553 | 0 | input_len -= compressed_small_block_len; |
554 | |
|
555 | 0 | output_ptr += decompressed_small_block_len; |
556 | 0 | remaining_decompressed_large_block_len -= decompressed_small_block_len; |
557 | 0 | decompressed_large_block_len += decompressed_small_block_len; |
558 | 0 | }; |
559 | |
|
560 | 0 | if (*more_input_bytes != 0) { |
561 | | // Need more input buffer |
562 | 0 | input_ptr = large_block_input_ptr; |
563 | 0 | output_ptr = large_block_output_ptr; |
564 | 0 | break; |
565 | 0 | } |
566 | | |
567 | 0 | *decompressed_len += decompressed_large_block_len; |
568 | 0 | } |
569 | 0 | *input_bytes_read += (input_ptr - input); |
570 | | // If no more input and output need, means this is the end of a compressed block |
571 | 0 | *stream_end = (*more_input_bytes == 0 && *more_output_bytes == 0); |
572 | |
|
573 | 0 | return Status::OK(); |
574 | 0 | } |
575 | | |
576 | 0 | std::string Lz4BlockDecompressor::debug_info() { |
577 | 0 | std::stringstream ss; |
578 | 0 | ss << "Lz4BlockDecompressor."; |
579 | 0 | return ss.str(); |
580 | 0 | } |
581 | | |
582 | | /// SnappyBlockDecompressor |
583 | 0 | Status SnappyBlockDecompressor::init() { |
584 | 0 | return Status::OK(); |
585 | 0 | } |
586 | | |
587 | | // Hadoop snappycodec source : |
588 | | // https://github.com/apache/hadoop/blob/trunk/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/codec/SnappyCodec.cc |
589 | | // Example: |
590 | | // OriginData(The original data will be divided into several large data block.) : |
591 | | // large data block1 | large data block2 | large data block3 | .... |
592 | | // The large data block will be divided into several small data block. |
593 | | // Suppose a large data block is divided into three small blocks: |
594 | | // large data block1: | small block1 | small block2 | small block3 | |
595 | | // CompressData: <A [B1 compress(small block1) ] [B2 compress(small block1) ] [B3 compress(small block1)]> |
596 | | // |
597 | | // A : original length of the current block of large data block. |
598 | | // sizeof(A) = 4 bytes. |
599 | | // A = length(small block1) + length(small block2) + length(small block3) |
600 | | // Bx : length of small data block bx. |
601 | | // sizeof(Bx) = 4 bytes. |
602 | | // Bx = length(compress(small blockx)) |
603 | | Status SnappyBlockDecompressor::decompress(uint8_t* input, size_t input_len, |
604 | | size_t* input_bytes_read, uint8_t* output, |
605 | | size_t output_max_len, size_t* decompressed_len, |
606 | | bool* stream_end, size_t* more_input_bytes, |
607 | 0 | size_t* more_output_bytes) { |
608 | 0 | auto* input_ptr = input; |
609 | 0 | auto* output_ptr = output; |
610 | |
|
611 | 0 | while (input_len > 0) { |
612 | | //if faild , fall back to large block begin |
613 | 0 | auto* large_block_input_ptr = input_ptr; |
614 | 0 | auto* large_block_output_ptr = output_ptr; |
615 | |
|
616 | 0 | if (input_len < sizeof(uint32_t)) { |
617 | 0 | return Status::InvalidArgument(strings::Substitute( |
618 | 0 | "fail to do hadoop-snappy decompress, input_len=$0", input_len)); |
619 | 0 | } |
620 | | |
621 | 0 | uint32_t remaining_decompressed_large_block_len = BigEndian::Load32(input_ptr); |
622 | |
|
623 | 0 | input_ptr += sizeof(uint32_t); |
624 | 0 | input_len -= sizeof(uint32_t); |
625 | |
|
626 | 0 | std::size_t remaining_output_len = output_max_len - *decompressed_len; |
627 | |
|
628 | 0 | if (remaining_output_len < remaining_decompressed_large_block_len) { |
629 | | // Need more output buffer |
630 | 0 | *more_output_bytes = remaining_decompressed_large_block_len - remaining_output_len; |
631 | 0 | input_ptr = large_block_input_ptr; |
632 | 0 | output_ptr = large_block_output_ptr; |
633 | |
|
634 | 0 | break; |
635 | 0 | } |
636 | | |
637 | 0 | std::size_t decompressed_large_block_len = 0; |
638 | 0 | while (remaining_decompressed_large_block_len > 0) { |
639 | | // Check that input length should not be negative. |
640 | 0 | if (input_len < sizeof(uint32_t)) { |
641 | 0 | *more_input_bytes = sizeof(uint32_t) - input_len; |
642 | 0 | break; |
643 | 0 | } |
644 | | |
645 | | // Read the length of the next snappy compressed block. |
646 | 0 | size_t compressed_small_block_len = BigEndian::Load32(input_ptr); |
647 | |
|
648 | 0 | input_ptr += sizeof(uint32_t); |
649 | 0 | input_len -= sizeof(uint32_t); |
650 | |
|
651 | 0 | if (compressed_small_block_len == 0) { |
652 | 0 | continue; |
653 | 0 | } |
654 | | |
655 | 0 | if (compressed_small_block_len > input_len) { |
656 | | // Need more input buffer |
657 | 0 | *more_input_bytes = compressed_small_block_len - input_len; |
658 | 0 | break; |
659 | 0 | } |
660 | | |
661 | | // Decompress this block. |
662 | 0 | size_t decompressed_small_block_len; |
663 | 0 | if (!snappy::GetUncompressedLength(reinterpret_cast<const char*>(input_ptr), |
664 | 0 | compressed_small_block_len, |
665 | 0 | &decompressed_small_block_len)) { |
666 | 0 | return Status::InternalError( |
667 | 0 | "snappy block decompress failed to get uncompressed len"); |
668 | 0 | } |
669 | 0 | if (!snappy::RawUncompress(reinterpret_cast<const char*>(input_ptr), |
670 | 0 | compressed_small_block_len, |
671 | 0 | reinterpret_cast<char*>(output_ptr))) { |
672 | 0 | return Status::InternalError( |
673 | 0 | "snappy block decompress failed. uncompressed_len: {}, compressed_len: {}", |
674 | 0 | decompressed_small_block_len, compressed_small_block_len); |
675 | 0 | } |
676 | 0 | input_ptr += compressed_small_block_len; |
677 | 0 | input_len -= compressed_small_block_len; |
678 | |
|
679 | 0 | output_ptr += decompressed_small_block_len; |
680 | 0 | remaining_decompressed_large_block_len -= decompressed_small_block_len; |
681 | 0 | decompressed_large_block_len += decompressed_small_block_len; |
682 | 0 | }; |
683 | |
|
684 | 0 | if (*more_input_bytes != 0) { |
685 | | // Need more input buffer |
686 | 0 | input_ptr = large_block_input_ptr; |
687 | 0 | output_ptr = large_block_output_ptr; |
688 | 0 | break; |
689 | 0 | } |
690 | | |
691 | 0 | *decompressed_len += decompressed_large_block_len; |
692 | 0 | } |
693 | 0 | *input_bytes_read += (input_ptr - input); |
694 | | // If no more input and output need, means this is the end of a compressed block |
695 | 0 | *stream_end = (*more_input_bytes == 0 && *more_output_bytes == 0); |
696 | |
|
697 | 0 | return Status::OK(); |
698 | 0 | } |
699 | | |
700 | 0 | std::string SnappyBlockDecompressor::debug_info() { |
701 | 0 | std::stringstream ss; |
702 | 0 | ss << "SnappyBlockDecompressor."; |
703 | 0 | return ss.str(); |
704 | 0 | } |
705 | | |
706 | | } // namespace doris |