Coverage Report

Created: 2026-04-15 19:34

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/table/iceberg_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/table/iceberg_reader.h"
19
20
#include <gen_cpp/Descriptors_types.h>
21
#include <gen_cpp/Metrics_types.h>
22
#include <gen_cpp/PlanNodes_types.h>
23
#include <gen_cpp/parquet_types.h>
24
#include <glog/logging.h>
25
#include <parallel_hashmap/phmap.h>
26
#include <rapidjson/document.h>
27
28
#include <algorithm>
29
#include <cstring>
30
#include <functional>
31
#include <memory>
32
#include <set>
33
34
#include "common/compiler_util.h" // IWYU pragma: keep
35
#include "common/status.h"
36
#include "core/assert_cast.h"
37
#include "core/block/block.h"
38
#include "core/block/column_with_type_and_name.h"
39
#include "core/column/column.h"
40
#include "core/data_type/data_type_factory.hpp"
41
#include "exprs/aggregate/aggregate_function.h"
42
#include "format/format_common.h"
43
#include "format/generic_reader.h"
44
#include "format/orc/vorc_reader.h"
45
#include "format/parquet/schema_desc.h"
46
#include "format/parquet/vparquet_column_chunk_reader.h"
47
#include "format/table/deletion_vector_reader.h"
48
#include "format/table/iceberg/iceberg_orc_nested_column_utils.h"
49
#include "format/table/iceberg/iceberg_parquet_nested_column_utils.h"
50
#include "format/table/iceberg_delete_file_reader_helper.h"
51
#include "format/table/nested_column_access_helper.h"
52
#include "format/table/table_format_reader.h"
53
#include "runtime/runtime_state.h"
54
#include "util/coding.h"
55
56
namespace cctz {
57
class time_zone;
58
} // namespace cctz
59
namespace doris {
60
class RowDescriptor;
61
class SlotDescriptor;
62
class TupleDescriptor;
63
64
namespace io {
65
struct IOContext;
66
} // namespace io
67
class VExprContext;
68
} // namespace doris
69
70
namespace doris {
71
namespace {
72
73
class GroupedDeleteRowsVisitor final : public IcebergPositionDeleteVisitor {
74
public:
75
    using DeleteRows = std::vector<int64_t>;
76
    using DeleteFile = phmap::parallel_flat_hash_map<
77
            std::string, std::unique_ptr<DeleteRows>, std::hash<std::string>, std::equal_to<>,
78
            std::allocator<std::pair<const std::string, std::unique_ptr<DeleteRows>>>, 8,
79
            std::mutex>;
80
81
    explicit GroupedDeleteRowsVisitor(DeleteFile* position_delete)
82
0
            : _position_delete(position_delete) {}
83
84
0
    Status visit(const std::string& file_path, int64_t pos) override {
85
0
        if (_position_delete == nullptr) {
86
0
            return Status::InvalidArgument("position delete map is null");
87
0
        }
88
89
0
        auto iter = _position_delete->find(file_path);
90
0
        DeleteRows* delete_rows = nullptr;
91
0
        if (iter == _position_delete->end()) {
92
0
            delete_rows = new DeleteRows;
93
0
            (*_position_delete)[file_path] = std::unique_ptr<DeleteRows>(delete_rows);
94
0
        } else {
95
0
            delete_rows = iter->second.get();
96
0
        }
97
0
        delete_rows->push_back(pos);
98
0
        return Status::OK();
99
0
    }
100
101
private:
102
    DeleteFile* _position_delete;
103
};
104
105
} // namespace
106
107
const std::string IcebergOrcReader::ICEBERG_ORC_ATTRIBUTE = "iceberg.id";
108
109
bool IcebergTableReader::_is_fully_dictionary_encoded(
110
6
        const tparquet::ColumnMetaData& column_metadata) {
111
12
    const auto is_dictionary_encoding = [](tparquet::Encoding::type encoding) {
112
12
        return encoding == tparquet::Encoding::PLAIN_DICTIONARY ||
113
12
               encoding == tparquet::Encoding::RLE_DICTIONARY;
114
12
    };
115
8
    const auto is_data_page = [](tparquet::PageType::type page_type) {
116
8
        return page_type == tparquet::PageType::DATA_PAGE ||
117
8
               page_type == tparquet::PageType::DATA_PAGE_V2;
118
8
    };
119
6
    const auto is_level_encoding = [](tparquet::Encoding::type encoding) {
120
2
        return encoding == tparquet::Encoding::RLE || encoding == tparquet::Encoding::BIT_PACKED;
121
2
    };
122
123
    // A column chunk may have a dictionary page but still contain plain-encoded data pages.
124
    // Only treat it as dictionary-coded when all data pages are dictionary encoded.
125
6
    if (column_metadata.__isset.encoding_stats) {
126
5
        bool has_data_page_stats = false;
127
8
        for (const tparquet::PageEncodingStats& enc_stat : column_metadata.encoding_stats) {
128
8
            if (is_data_page(enc_stat.page_type) && enc_stat.count > 0) {
129
6
                has_data_page_stats = true;
130
6
                if (!is_dictionary_encoding(enc_stat.encoding)) {
131
2
                    return false;
132
2
                }
133
6
            }
134
8
        }
135
3
        if (has_data_page_stats) {
136
2
            return true;
137
2
        }
138
3
    }
139
140
2
    bool has_dict_encoding = false;
141
2
    bool has_nondict_encoding = false;
142
3
    for (const tparquet::Encoding::type& encoding : column_metadata.encodings) {
143
3
        if (is_dictionary_encoding(encoding)) {
144
1
            has_dict_encoding = true;
145
1
        }
146
147
3
        if (!is_dictionary_encoding(encoding) && !is_level_encoding(encoding)) {
148
2
            has_nondict_encoding = true;
149
2
            break;
150
2
        }
151
3
    }
152
2
    if (!has_dict_encoding || has_nondict_encoding) {
153
2
        return false;
154
2
    }
155
156
0
    return true;
157
2
}
158
159
IcebergTableReader::IcebergTableReader(std::unique_ptr<GenericReader> file_format_reader,
160
                                       RuntimeProfile* profile, RuntimeState* state,
161
                                       const TFileScanRangeParams& params,
162
                                       const TFileRangeDesc& range, ShardedKVCache* kv_cache,
163
                                       io::IOContext* io_ctx, FileMetaCache* meta_cache)
164
16
        : TableFormatReader(std::move(file_format_reader), state, profile, params, range, io_ctx,
165
16
                            meta_cache),
166
16
          _kv_cache(kv_cache) {
167
16
    static const char* iceberg_profile = "IcebergProfile";
168
16
    ADD_TIMER(_profile, iceberg_profile);
169
16
    _iceberg_profile.num_delete_files =
170
16
            ADD_CHILD_COUNTER(_profile, "NumDeleteFiles", TUnit::UNIT, iceberg_profile);
171
16
    _iceberg_profile.num_delete_rows =
172
16
            ADD_CHILD_COUNTER(_profile, "NumDeleteRows", TUnit::UNIT, iceberg_profile);
173
16
    _iceberg_profile.delete_files_read_time =
174
16
            ADD_CHILD_TIMER(_profile, "DeleteFileReadTime", iceberg_profile);
175
16
    _iceberg_profile.delete_rows_sort_time =
176
16
            ADD_CHILD_TIMER(_profile, "DeleteRowsSortTime", iceberg_profile);
177
16
    _iceberg_profile.parse_delete_file_time =
178
16
            ADD_CHILD_TIMER(_profile, "ParseDeleteFileTime", iceberg_profile);
179
16
}
180
181
2
Status IcebergTableReader::get_next_block_inner(Block* block, size_t* read_rows, bool* eof) {
182
2
    RETURN_IF_ERROR(_expand_block_if_need(block));
183
184
2
    RETURN_IF_ERROR(_file_format_reader->get_next_block(block, read_rows, eof));
185
186
2
    if (_equality_delete_impls.size() > 0) {
187
0
        std::unique_ptr<IColumn::Filter> filter =
188
0
                std::make_unique<IColumn::Filter>(block->rows(), 1);
189
0
        for (auto& equality_delete_impl : _equality_delete_impls) {
190
0
            RETURN_IF_ERROR(equality_delete_impl->filter_data_block(
191
0
                    block, _col_name_to_block_idx, _id_to_block_column_name, *filter));
192
0
        }
193
0
        Block::filter_block_internal(block, *filter, block->columns());
194
0
    }
195
196
2
    *read_rows = block->rows();
197
2
    return _shrink_block_if_need(block);
198
2
}
199
200
Status IcebergTableReader::_execute_or_defer_delete_loading(std::function<Status()> loader,
201
0
                                                            size_t num_files) {
202
0
    if (_file_format == Fileformat::PARQUET) {
203
0
        auto* parquet_reader = static_cast<ParquetReader*>(_file_format_reader.get());
204
0
        parquet_reader->set_position_delete_loader(
205
0
                [this, loader = std::move(loader), num_files]() -> Status {
206
0
                    RETURN_IF_ERROR(loader());
207
0
                    COUNTER_UPDATE(_iceberg_profile.num_delete_files, num_files);
208
0
                    return Status::OK();
209
0
                });
210
0
        _has_deferred_delete_files = true;
211
0
    } else {
212
0
        RETURN_IF_ERROR(loader());
213
0
        COUNTER_UPDATE(_iceberg_profile.num_delete_files, num_files);
214
0
    }
215
0
    return Status::OK();
216
0
}
217
218
2
Status IcebergTableReader::init_row_filters() {
219
    // We get the count value by doris's be, so we don't need to read the delete file
220
2
    if (_push_down_agg_type == TPushAggOp::type::COUNT && _table_level_row_count > 0) {
221
0
        return Status::OK();
222
0
    }
223
224
2
    const auto& table_desc = _range.table_format_params.iceberg_params;
225
2
    const auto& version = table_desc.format_version;
226
2
    if (version < MIN_SUPPORT_DELETE_FILES_VERSION) {
227
2
        return Status::OK();
228
2
    }
229
230
0
    auto* parquet_reader = dynamic_cast<ParquetReader*>(_file_format_reader.get());
231
0
    auto* orc_reader = dynamic_cast<OrcReader*>(_file_format_reader.get());
232
233
    // Initialize file information for $row_id generation
234
    // Extract from table_desc which contains current file's metadata
235
0
    if (_need_row_id_column) {
236
0
        std::string file_path = table_desc.original_file_path;
237
0
        int32_t partition_spec_id = 0;
238
0
        std::string partition_data_json;
239
0
        if (table_desc.__isset.partition_spec_id) {
240
0
            partition_spec_id = table_desc.partition_spec_id;
241
0
        }
242
0
        if (table_desc.__isset.partition_data_json) {
243
0
            partition_data_json = table_desc.partition_data_json;
244
0
        }
245
246
0
        if (parquet_reader != nullptr) {
247
0
            parquet_reader->set_iceberg_rowid_params(file_path, partition_spec_id,
248
0
                                                     partition_data_json, _row_id_column_position);
249
0
        } else if (orc_reader != nullptr) {
250
0
            orc_reader->set_iceberg_rowid_params(file_path, partition_spec_id, partition_data_json,
251
0
                                                 _row_id_column_position);
252
0
        }
253
0
        LOG(INFO) << "Initialized $row_id generation for file: " << file_path
254
0
                  << ", partition_spec_id: " << partition_spec_id;
255
0
    }
256
257
0
    std::vector<TIcebergDeleteFileDesc> position_delete_files;
258
0
    std::vector<TIcebergDeleteFileDesc> equality_delete_files;
259
0
    std::vector<TIcebergDeleteFileDesc> deletion_vector_files;
260
0
    for (const TIcebergDeleteFileDesc& desc : table_desc.delete_files) {
261
0
        if (desc.content == POSITION_DELETE) {
262
0
            position_delete_files.emplace_back(desc);
263
0
        } else if (desc.content == EQUALITY_DELETE) {
264
0
            equality_delete_files.emplace_back(desc);
265
0
        } else if (desc.content == DELETION_VECTOR) {
266
0
            deletion_vector_files.emplace_back(desc);
267
0
        }
268
0
    }
269
270
    // Equality deletes must be loaded eagerly: they determine which additional
271
    // columns are needed for the data file read schema.
272
0
    if (!equality_delete_files.empty()) {
273
0
        RETURN_IF_ERROR(_process_equality_delete(equality_delete_files));
274
0
        _file_format_reader->set_push_down_agg_type(TPushAggOp::NONE);
275
0
        COUNTER_UPDATE(_iceberg_profile.num_delete_files, equality_delete_files.size());
276
0
    }
277
278
    // Position deletes and deletion vectors are deferred for Parquet: they are
279
    // only loaded when the first row group survives min/max + bloom filter
280
    // filtering. This avoids wasting I/O when all row groups are filtered out.
281
0
    if (!deletion_vector_files.empty()) {
282
0
        if (deletion_vector_files.size() != 1) [[unlikely]] {
283
            /*
284
             * Deletion vectors are a binary representation of deletes for a single data file that is more efficient
285
             * at execution time than position delete files. Unlike equality or position delete files, there can be
286
             * at most one deletion vector for a given data file in a snapshot.
287
             */
288
0
            return Status::DataQualityError("This iceberg data file has multiple DVs.");
289
0
        }
290
0
        auto data_file_path = table_desc.original_file_path;
291
0
        auto dv_file = deletion_vector_files[0];
292
0
        RETURN_IF_ERROR(_execute_or_defer_delete_loading(
293
0
                [this, data_file_path = std::move(data_file_path), dv_file = std::move(dv_file)]()
294
0
                        -> Status { return read_deletion_vector(data_file_path, dv_file); },
295
0
                deletion_vector_files.size()));
296
0
        _file_format_reader->set_push_down_agg_type(TPushAggOp::NONE);
297
        // Readers can safely ignore position delete files if there is a DV for a data file.
298
0
    } else if (!position_delete_files.empty()) {
299
0
        auto data_file_path = table_desc.original_file_path;
300
0
        auto num_files = position_delete_files.size();
301
0
        RETURN_IF_ERROR(_execute_or_defer_delete_loading(
302
0
                [this, data_file_path = std::move(data_file_path),
303
0
                 pos_delete_files = std::move(position_delete_files)]() mutable -> Status {
304
0
                    return _position_delete_base(data_file_path, pos_delete_files);
305
0
                },
306
0
                num_files));
307
0
        _file_format_reader->set_push_down_agg_type(TPushAggOp::NONE);
308
0
    }
309
0
    return Status::OK();
310
0
}
311
312
void IcebergTableReader::_generate_equality_delete_block(
313
        Block* block, const std::vector<std::string>& equality_delete_col_names,
314
0
        const std::vector<DataTypePtr>& equality_delete_col_types) {
315
0
    for (int i = 0; i < equality_delete_col_names.size(); ++i) {
316
0
        DataTypePtr data_type = make_nullable(equality_delete_col_types[i]);
317
0
        MutableColumnPtr data_column = data_type->create_column();
318
0
        block->insert(ColumnWithTypeAndName(std::move(data_column), data_type,
319
0
                                            equality_delete_col_names[i]));
320
0
    }
321
0
}
322
323
2
Status IcebergTableReader::_expand_block_if_need(Block* block) {
324
2
    std::set<std::string> names;
325
2
    auto block_names = block->get_names();
326
2
    names.insert(block_names.begin(), block_names.end());
327
2
    for (auto& col : _expand_columns) {
328
0
        col.column->assume_mutable()->clear();
329
0
        if (names.contains(col.name)) {
330
0
            return Status::InternalError("Wrong expand column '{}'", col.name);
331
0
        }
332
0
        names.insert(col.name);
333
0
        (*_col_name_to_block_idx)[col.name] = static_cast<uint32_t>(block->columns());
334
0
        block->insert(col);
335
0
    }
336
2
    return Status::OK();
337
2
}
338
339
2
Status IcebergTableReader::_shrink_block_if_need(Block* block) {
340
2
    std::set<size_t> positions_to_erase;
341
2
    for (const std::string& expand_col : _expand_col_names) {
342
0
        if (!_col_name_to_block_idx->contains(expand_col)) {
343
0
            return Status::InternalError("Wrong erase column '{}', block: {}", expand_col,
344
0
                                         block->dump_names());
345
0
        }
346
0
        positions_to_erase.emplace((*_col_name_to_block_idx)[expand_col]);
347
0
    }
348
2
    block->erase(positions_to_erase);
349
2
    for (const std::string& expand_col : _expand_col_names) {
350
0
        _col_name_to_block_idx->erase(expand_col);
351
0
    }
352
2
    return Status::OK();
353
2
}
354
355
Status IcebergTableReader::_position_delete_base(
356
0
        const std::string data_file_path, const std::vector<TIcebergDeleteFileDesc>& delete_files) {
357
0
    std::vector<DeleteRows*> delete_rows_array;
358
0
    int64_t num_delete_rows = 0;
359
0
    for (const auto& delete_file : delete_files) {
360
0
        SCOPED_TIMER(_iceberg_profile.delete_files_read_time);
361
0
        Status create_status = Status::OK();
362
0
        auto* delete_file_cache = _kv_cache->get<DeleteFile>(
363
0
                _delet_file_cache_key(delete_file.path), [&]() -> DeleteFile* {
364
0
                    auto* position_delete = new DeleteFile;
365
0
                    create_status = _read_position_delete_file(delete_file, position_delete);
366
367
0
                    if (!create_status) {
368
0
                        return nullptr;
369
0
                    }
370
371
0
                    return position_delete;
372
0
                });
373
0
        if (create_status.is<ErrorCode::END_OF_FILE>()) {
374
0
            continue;
375
0
        } else if (!create_status.ok()) {
376
0
            return create_status;
377
0
        }
378
379
0
        DeleteFile& delete_file_map = *((DeleteFile*)delete_file_cache);
380
0
        auto get_value = [&](const auto& v) {
381
0
            DeleteRows* row_ids = v.second.get();
382
0
            if (!row_ids->empty()) {
383
0
                delete_rows_array.emplace_back(row_ids);
384
0
                num_delete_rows += row_ids->size();
385
0
            }
386
0
        };
387
0
        delete_file_map.if_contains(data_file_path, get_value);
388
0
    }
389
    // Use a KV cache to store the delete rows corresponding to a data file path.
390
    // The Parquet/ORC reader holds a reference (pointer) to this cached entry.
391
    // This allows delete rows to be reused when a single data file is split into
392
    // multiple splits, avoiding excessive memory usage when delete rows are large.
393
0
    if (num_delete_rows > 0) {
394
0
        SCOPED_TIMER(_iceberg_profile.delete_rows_sort_time);
395
0
        _iceberg_delete_rows =
396
0
                _kv_cache->get<DeleteRows>(data_file_path,
397
0
                                           [&]() -> DeleteRows* {
398
0
                                               auto* data_file_position_delete = new DeleteRows;
399
0
                                               _sort_delete_rows(delete_rows_array, num_delete_rows,
400
0
                                                                 *data_file_position_delete);
401
402
0
                                               return data_file_position_delete;
403
0
                                           }
404
405
0
                );
406
0
        set_delete_rows();
407
0
        COUNTER_UPDATE(_iceberg_profile.num_delete_rows, num_delete_rows);
408
0
    }
409
0
    return Status::OK();
410
0
}
411
412
Status IcebergTableReader::_read_position_delete_file(const TIcebergDeleteFileDesc& delete_file,
413
0
                                                      DeleteFile* position_delete) {
414
0
    GroupedDeleteRowsVisitor visitor(position_delete);
415
0
    IcebergDeleteFileReaderOptions options;
416
0
    options.state = _state;
417
0
    options.profile = _profile;
418
0
    options.scan_params = &_params;
419
0
    options.io_ctx = _io_ctx;
420
0
    options.meta_cache = _meta_cache;
421
0
    options.fs_name = &_range.fs_name;
422
0
    options.batch_size = READ_DELETE_FILE_BATCH_SIZE;
423
0
    return read_iceberg_position_delete_file(delete_file, options, &visitor);
424
0
}
425
426
/**
427
 * https://iceberg.apache.org/spec/#position-delete-files
428
 * The rows in the delete file must be sorted by file_path then position to optimize filtering rows while scanning.
429
 * Sorting by file_path allows filter pushdown by file in columnar storage formats.
430
 * Sorting by position allows filtering rows while scanning, to avoid keeping deletes in memory.
431
 */
432
void IcebergTableReader::_sort_delete_rows(
433
        const std::vector<std::vector<int64_t>*>& delete_rows_array, int64_t num_delete_rows,
434
0
        std::vector<int64_t>& result) {
435
0
    if (delete_rows_array.empty()) {
436
0
        return;
437
0
    }
438
0
    if (delete_rows_array.size() == 1) {
439
0
        result.resize(num_delete_rows);
440
0
        memcpy(result.data(), delete_rows_array.front()->data(), sizeof(int64_t) * num_delete_rows);
441
0
        return;
442
0
    }
443
0
    if (delete_rows_array.size() == 2) {
444
0
        result.resize(num_delete_rows);
445
0
        std::merge(delete_rows_array.front()->begin(), delete_rows_array.front()->end(),
446
0
                   delete_rows_array.back()->begin(), delete_rows_array.back()->end(),
447
0
                   result.begin());
448
0
        return;
449
0
    }
450
451
0
    using vec_pair = std::pair<std::vector<int64_t>::iterator, std::vector<int64_t>::iterator>;
452
0
    result.resize(num_delete_rows);
453
0
    auto row_id_iter = result.begin();
454
0
    auto iter_end = result.end();
455
0
    std::vector<vec_pair> rows_array;
456
0
    for (auto* rows : delete_rows_array) {
457
0
        if (!rows->empty()) {
458
0
            rows_array.emplace_back(rows->begin(), rows->end());
459
0
        }
460
0
    }
461
0
    size_t array_size = rows_array.size();
462
0
    while (row_id_iter != iter_end) {
463
0
        int64_t min_index = 0;
464
0
        int64_t min = *rows_array[0].first;
465
0
        for (size_t i = 0; i < array_size; ++i) {
466
0
            if (*rows_array[i].first < min) {
467
0
                min_index = i;
468
0
                min = *rows_array[i].first;
469
0
            }
470
0
        }
471
0
        *row_id_iter++ = min;
472
0
        rows_array[min_index].first++;
473
0
        if (UNLIKELY(rows_array[min_index].first == rows_array[min_index].second)) {
474
0
            rows_array.erase(rows_array.begin() + min_index);
475
0
            array_size--;
476
0
        }
477
0
    }
478
0
}
479
480
Status IcebergParquetReader::init_reader(
481
        const std::vector<std::string>& file_col_names,
482
        std::unordered_map<std::string, uint32_t>* col_name_to_block_idx,
483
        const VExprContextSPtrs& conjuncts,
484
        phmap::flat_hash_map<int, std::vector<std::shared_ptr<ColumnPredicate>>>&
485
                slot_id_to_predicates,
486
        const TupleDescriptor* tuple_descriptor, const RowDescriptor* row_descriptor,
487
        const std::unordered_map<std::string, int>* colname_to_slot_id,
488
        const VExprContextSPtrs* not_single_slot_filter_conjuncts,
489
1
        const std::unordered_map<int, VExprContextSPtrs>* slot_id_to_filter_conjuncts) {
490
1
    _file_format = Fileformat::PARQUET;
491
1
    _col_name_to_block_idx = col_name_to_block_idx;
492
1
    auto* parquet_reader = static_cast<ParquetReader*>(_file_format_reader.get());
493
1
    RETURN_IF_ERROR(parquet_reader->get_file_metadata_schema(&_data_file_field_desc));
494
1
    DCHECK(_data_file_field_desc != nullptr);
495
1
    if (_row_lineage_columns != nullptr) {
496
0
        const auto& table_desc = _range.table_format_params.iceberg_params;
497
0
        _row_lineage_columns->first_row_id =
498
0
                table_desc.__isset.first_row_id ? table_desc.first_row_id : -1;
499
0
        _row_lineage_columns->last_updated_sequence_number =
500
0
                table_desc.__isset.last_updated_sequence_number
501
0
                        ? table_desc.last_updated_sequence_number
502
0
                        : -1;
503
0
        parquet_reader->set_row_lineage_columns(_row_lineage_columns);
504
0
    }
505
506
1
    auto column_id_result = _create_column_ids(_data_file_field_desc, tuple_descriptor);
507
1
    auto& column_ids = column_id_result.column_ids;
508
1
    const auto& filter_column_ids = column_id_result.filter_column_ids;
509
510
1
    RETURN_IF_ERROR(init_row_filters());
511
1
    _all_required_col_names = file_col_names;
512
513
1
    if (!_params.__isset.history_schema_info || _params.history_schema_info.empty()) [[unlikely]] {
514
1
        RETURN_IF_ERROR(BuildTableInfoUtil::by_parquet_name(
515
1
                tuple_descriptor, *_data_file_field_desc, table_info_node_ptr));
516
1
    } else {
517
0
        std::set<std::string> read_col_name_set(file_col_names.begin(), file_col_names.end());
518
519
0
        bool exist_field_id = true;
520
0
        for (int idx = 0; idx < _data_file_field_desc->size(); idx++) {
521
0
            if (_data_file_field_desc->get_column(idx)->field_id == -1) {
522
                // the data file may be from hive table migrated to iceberg, field id is missing
523
0
                exist_field_id = false;
524
0
                break;
525
0
            }
526
0
        }
527
0
        const auto& table_schema = _params.history_schema_info.front().root_field;
528
529
0
        table_info_node_ptr = std::make_shared<TableSchemaChangeHelper::StructNode>();
530
0
        if (exist_field_id) {
531
            // id -> table column name. columns that need read data file.
532
0
            std::unordered_map<int, std::shared_ptr<schema::external::TField>> id_to_table_field;
533
0
            for (const auto& table_field : table_schema.fields) {
534
0
                auto field = table_field.field_ptr;
535
0
                DCHECK(field->__isset.name);
536
0
                if (!read_col_name_set.contains(field->name)) {
537
0
                    continue;
538
0
                }
539
0
                id_to_table_field.emplace(field->id, field);
540
0
            }
541
542
0
            for (int idx = 0; idx < _data_file_field_desc->size(); idx++) {
543
0
                const auto& data_file_field = _data_file_field_desc->get_column(idx);
544
0
                auto data_file_column_id = _data_file_field_desc->get_column(idx)->field_id;
545
546
0
                if (id_to_table_field.contains(data_file_column_id)) {
547
0
                    const auto& table_field = id_to_table_field[data_file_column_id];
548
549
0
                    std::shared_ptr<TableSchemaChangeHelper::Node> field_node = nullptr;
550
0
                    RETURN_IF_ERROR(BuildTableInfoUtil::by_parquet_field_id(
551
0
                            *table_field, *data_file_field, exist_field_id, field_node));
552
0
                    table_info_node_ptr->add_children(table_field->name, data_file_field->name,
553
0
                                                      field_node);
554
555
0
                    _id_to_block_column_name.emplace(data_file_column_id, table_field->name);
556
0
                    id_to_table_field.erase(data_file_column_id);
557
0
                } else if (_equality_delete_col_ids.contains(data_file_column_id)) {
558
                    // Columns that need to be read for equality delete.
559
0
                    const static std::string EQ_DELETE_PRE = "__equality_delete_column__";
560
561
                    // Construct table column names that avoid duplication with current table schema.
562
                    // As the columns currently being read may have been deleted in the latest
563
                    // table structure or have undergone a series of schema changes...
564
0
                    std::string table_column_name = EQ_DELETE_PRE + data_file_field->name;
565
0
                    table_info_node_ptr->add_children(
566
0
                            table_column_name, data_file_field->name,
567
0
                            std::make_shared<TableSchemaChangeHelper::ConstNode>());
568
569
0
                    _id_to_block_column_name.emplace(data_file_column_id, table_column_name);
570
0
                    _expand_col_names.emplace_back(table_column_name);
571
0
                    auto expand_data_type = make_nullable(data_file_field->data_type);
572
0
                    _expand_columns.emplace_back(
573
0
                            ColumnWithTypeAndName {expand_data_type->create_column(),
574
0
                                                   expand_data_type, table_column_name});
575
576
0
                    _all_required_col_names.emplace_back(table_column_name);
577
0
                    column_ids.insert(data_file_field->get_column_id());
578
0
                }
579
0
            }
580
0
            for (const auto& [id, table_field] : id_to_table_field) {
581
0
                table_info_node_ptr->add_not_exist_children(table_field->name);
582
0
            }
583
0
        } else {
584
0
            if (!_equality_delete_col_ids.empty()) [[unlikely]] {
585
0
                return Status::InternalError(
586
0
                        "Can not read missing field id data file when have equality delete");
587
0
            }
588
0
            std::map<std::string, size_t> file_column_idx_map;
589
0
            for (size_t idx = 0; idx < _data_file_field_desc->size(); idx++) {
590
0
                file_column_idx_map.emplace(_data_file_field_desc->get_column(idx)->name, idx);
591
0
            }
592
593
0
            for (const auto& table_field : table_schema.fields) {
594
0
                DCHECK(table_field.__isset.field_ptr);
595
0
                DCHECK(table_field.field_ptr->__isset.name);
596
0
                const auto& table_column_name = table_field.field_ptr->name;
597
0
                if (!read_col_name_set.contains(table_column_name)) {
598
0
                    continue;
599
0
                }
600
0
                if (!table_field.field_ptr->__isset.name_mapping ||
601
0
                    table_field.field_ptr->name_mapping.size() == 0) {
602
0
                    return Status::DataQualityError(
603
0
                            "name_mapping must be set when read missing field id data file.");
604
0
                }
605
0
                bool have_mapping = false;
606
0
                for (const auto& mapped_name : table_field.field_ptr->name_mapping) {
607
0
                    if (file_column_idx_map.contains(mapped_name)) {
608
0
                        std::shared_ptr<TableSchemaChangeHelper::Node> field_node = nullptr;
609
0
                        const auto& file_field = _data_file_field_desc->get_column(
610
0
                                file_column_idx_map.at(mapped_name));
611
0
                        RETURN_IF_ERROR(BuildTableInfoUtil::by_parquet_field_id(
612
0
                                *table_field.field_ptr, *file_field, exist_field_id, field_node));
613
0
                        table_info_node_ptr->add_children(table_column_name, file_field->name,
614
0
                                                          field_node);
615
0
                        have_mapping = true;
616
0
                        break;
617
0
                    }
618
0
                }
619
0
                if (!have_mapping) {
620
0
                    table_info_node_ptr->add_not_exist_children(table_column_name);
621
0
                }
622
0
            }
623
0
        }
624
0
    }
625
626
1
    return parquet_reader->init_reader(
627
1
            _all_required_col_names, _col_name_to_block_idx, conjuncts, slot_id_to_predicates,
628
1
            tuple_descriptor, row_descriptor, colname_to_slot_id, not_single_slot_filter_conjuncts,
629
1
            slot_id_to_filter_conjuncts, table_info_node_ptr, true, column_ids, filter_column_ids);
630
1
}
631
632
ColumnIdResult IcebergParquetReader::_create_column_ids(const FieldDescriptor* field_desc,
633
7
                                                        const TupleDescriptor* tuple_descriptor) {
634
    // First, assign column IDs to the field descriptor
635
7
    auto* mutable_field_desc = const_cast<FieldDescriptor*>(field_desc);
636
7
    mutable_field_desc->assign_ids();
637
638
    // map top-level table column iceberg_id -> FieldSchema*
639
7
    std::unordered_map<int, const FieldSchema*> iceberg_id_to_field_schema_map;
640
641
58
    for (int i = 0; i < field_desc->size(); ++i) {
642
51
        auto field_schema = field_desc->get_column(i);
643
51
        if (!field_schema) continue;
644
645
51
        int iceberg_id = field_schema->field_id;
646
51
        iceberg_id_to_field_schema_map[iceberg_id] = field_schema;
647
51
    }
648
649
7
    std::set<uint64_t> column_ids;
650
7
    std::set<uint64_t> filter_column_ids;
651
652
    // helper to process access paths for a given top-level parquet field
653
7
    auto process_access_paths = [](const FieldSchema* parquet_field,
654
7
                                   const std::vector<TColumnAccessPath>& access_paths,
655
14
                                   std::set<uint64_t>& out_ids) {
656
14
        process_nested_access_paths(
657
14
                parquet_field, access_paths, out_ids,
658
14
                [](const FieldSchema* field) { return field->get_column_id(); },
659
14
                [](const FieldSchema* field) { return field->get_max_column_id(); },
660
14
                IcebergParquetNestedColumnUtils::extract_nested_column_ids);
661
14
    };
662
663
15
    for (const auto* slot : tuple_descriptor->slots()) {
664
15
        auto it = iceberg_id_to_field_schema_map.find(slot->col_unique_id());
665
15
        if (it == iceberg_id_to_field_schema_map.end()) {
666
            // Column not found in file (e.g., partition column, added column)
667
0
            continue;
668
0
        }
669
15
        auto field_schema = it->second;
670
671
        // primitive (non-nested) types: direct mapping by name
672
15
        if ((slot->col_type() != TYPE_STRUCT && slot->col_type() != TYPE_ARRAY &&
673
15
             slot->col_type() != TYPE_MAP)) {
674
7
            column_ids.insert(field_schema->column_id);
675
676
7
            if (slot->is_predicate()) {
677
0
                filter_column_ids.insert(field_schema->column_id);
678
0
            }
679
7
            continue;
680
7
        }
681
682
        // complex types:
683
8
        const auto& all_access_paths = slot->all_access_paths();
684
8
        process_access_paths(field_schema, all_access_paths, column_ids);
685
686
8
        const auto& predicate_access_paths = slot->predicate_access_paths();
687
8
        if (!predicate_access_paths.empty()) {
688
6
            process_access_paths(field_schema, predicate_access_paths, filter_column_ids);
689
6
        }
690
8
    }
691
7
    return ColumnIdResult(std::move(column_ids), std::move(filter_column_ids));
692
7
}
693
694
Status IcebergOrcReader::init_reader(
695
        const std::vector<std::string>& file_col_names,
696
        std::unordered_map<std::string, uint32_t>* col_name_to_block_idx,
697
        const VExprContextSPtrs& conjuncts, const TupleDescriptor* tuple_descriptor,
698
        const RowDescriptor* row_descriptor,
699
        const std::unordered_map<std::string, int>* colname_to_slot_id,
700
        const VExprContextSPtrs* not_single_slot_filter_conjuncts,
701
1
        const std::unordered_map<int, VExprContextSPtrs>* slot_id_to_filter_conjuncts) {
702
1
    _file_format = Fileformat::ORC;
703
1
    _col_name_to_block_idx = col_name_to_block_idx;
704
1
    auto* orc_reader = static_cast<OrcReader*>(_file_format_reader.get());
705
1
    RETURN_IF_ERROR(orc_reader->get_file_type(&_data_file_type_desc));
706
1
    std::vector<std::string> data_file_col_names;
707
1
    std::vector<DataTypePtr> data_file_col_types;
708
1
    RETURN_IF_ERROR(orc_reader->get_parsed_schema(&data_file_col_names, &data_file_col_types));
709
1
    if (_row_lineage_columns != nullptr) {
710
0
        const auto& table_desc = _range.table_format_params.iceberg_params;
711
0
        _row_lineage_columns->first_row_id =
712
0
                table_desc.__isset.first_row_id ? table_desc.first_row_id : -1;
713
0
        _row_lineage_columns->last_updated_sequence_number =
714
0
                table_desc.__isset.last_updated_sequence_number
715
0
                        ? table_desc.last_updated_sequence_number
716
0
                        : -1;
717
0
        orc_reader->set_row_lineage_columns(_row_lineage_columns);
718
0
    }
719
720
1
    auto column_id_result = _create_column_ids(_data_file_type_desc, tuple_descriptor);
721
1
    auto& column_ids = column_id_result.column_ids;
722
1
    const auto& filter_column_ids = column_id_result.filter_column_ids;
723
724
1
    RETURN_IF_ERROR(init_row_filters());
725
726
1
    _all_required_col_names = file_col_names;
727
1
    if (!_params.__isset.history_schema_info || _params.history_schema_info.empty()) [[unlikely]] {
728
1
        RETURN_IF_ERROR(BuildTableInfoUtil::by_orc_name(tuple_descriptor, _data_file_type_desc,
729
1
                                                        table_info_node_ptr));
730
1
    } else {
731
0
        std::set<std::string> read_col_name_set(file_col_names.begin(), file_col_names.end());
732
733
0
        bool exist_field_id = true;
734
0
        for (size_t idx = 0; idx < _data_file_type_desc->getSubtypeCount(); idx++) {
735
0
            if (!_data_file_type_desc->getSubtype(idx)->hasAttributeKey(ICEBERG_ORC_ATTRIBUTE)) {
736
0
                exist_field_id = false;
737
0
                break;
738
0
            }
739
0
        }
740
741
0
        const auto& table_schema = _params.history_schema_info.front().root_field;
742
0
        table_info_node_ptr = std::make_shared<TableSchemaChangeHelper::StructNode>();
743
0
        if (exist_field_id) {
744
            // id -> table column name. columns that need read data file.
745
0
            std::unordered_map<int, std::shared_ptr<schema::external::TField>> id_to_table_field;
746
0
            for (const auto& table_field : table_schema.fields) {
747
0
                auto field = table_field.field_ptr;
748
0
                DCHECK(field->__isset.name);
749
0
                if (!read_col_name_set.contains(field->name)) {
750
0
                    continue;
751
0
                }
752
753
0
                id_to_table_field.emplace(field->id, field);
754
0
            }
755
756
0
            for (int idx = 0; idx < _data_file_type_desc->getSubtypeCount(); idx++) {
757
0
                const auto& data_file_field = _data_file_type_desc->getSubtype(idx);
758
0
                auto data_file_column_id =
759
0
                        std::stoi(data_file_field->getAttributeValue(ICEBERG_ORC_ATTRIBUTE));
760
0
                auto const& file_column_name = _data_file_type_desc->getFieldName(idx);
761
762
0
                if (id_to_table_field.contains(data_file_column_id)) {
763
0
                    const auto& table_field = id_to_table_field[data_file_column_id];
764
765
0
                    std::shared_ptr<TableSchemaChangeHelper::Node> field_node = nullptr;
766
0
                    RETURN_IF_ERROR(BuildTableInfoUtil::by_orc_field_id(
767
0
                            *table_field, data_file_field, ICEBERG_ORC_ATTRIBUTE, exist_field_id,
768
0
                            field_node));
769
0
                    table_info_node_ptr->add_children(table_field->name, file_column_name,
770
0
                                                      field_node);
771
772
0
                    _id_to_block_column_name.emplace(data_file_column_id, table_field->name);
773
0
                    id_to_table_field.erase(data_file_column_id);
774
0
                } else if (_equality_delete_col_ids.contains(data_file_column_id)) {
775
                    // Columns that need to be read for equality delete.
776
0
                    const static std::string EQ_DELETE_PRE = "__equality_delete_column__";
777
778
                    // Construct table column names that avoid duplication with current table schema.
779
                    // As the columns currently being read may have been deleted in the latest
780
                    // table structure or have undergone a series of schema changes...
781
0
                    std::string table_column_name = EQ_DELETE_PRE + file_column_name;
782
0
                    table_info_node_ptr->add_children(
783
0
                            table_column_name, file_column_name,
784
0
                            std::make_shared<TableSchemaChangeHelper::ConstNode>());
785
786
0
                    _id_to_block_column_name.emplace(data_file_column_id, table_column_name);
787
0
                    _expand_col_names.emplace_back(table_column_name);
788
789
0
                    auto expand_data_type = make_nullable(data_file_col_types[idx]);
790
0
                    _expand_columns.emplace_back(
791
0
                            ColumnWithTypeAndName {expand_data_type->create_column(),
792
0
                                                   expand_data_type, table_column_name});
793
794
0
                    _all_required_col_names.emplace_back(table_column_name);
795
0
                    column_ids.insert(data_file_field->getColumnId());
796
0
                }
797
0
            }
798
0
            for (const auto& [id, table_field] : id_to_table_field) {
799
0
                table_info_node_ptr->add_not_exist_children(table_field->name);
800
0
            }
801
0
        } else {
802
0
            if (!_equality_delete_col_ids.empty()) [[unlikely]] {
803
0
                return Status::InternalError(
804
0
                        "Can not read missing field id data file when have equality delete");
805
0
            }
806
0
            std::map<std::string, size_t> file_column_idx_map;
807
0
            for (int idx = 0; idx < _data_file_type_desc->getSubtypeCount(); idx++) {
808
0
                auto const& file_column_name = _data_file_type_desc->getFieldName(idx);
809
0
                file_column_idx_map.emplace(file_column_name, idx);
810
0
            }
811
812
0
            for (const auto& table_field : table_schema.fields) {
813
0
                DCHECK(table_field.__isset.field_ptr);
814
0
                DCHECK(table_field.field_ptr->__isset.name);
815
0
                const auto& table_column_name = table_field.field_ptr->name;
816
0
                if (!read_col_name_set.contains(table_column_name)) {
817
0
                    continue;
818
0
                }
819
0
                if (!table_field.field_ptr->__isset.name_mapping ||
820
0
                    table_field.field_ptr->name_mapping.size() == 0) {
821
0
                    return Status::DataQualityError(
822
0
                            "name_mapping must be set when read missing field id data file.");
823
0
                }
824
0
                auto have_mapping = false;
825
0
                for (const auto& mapped_name : table_field.field_ptr->name_mapping) {
826
0
                    if (file_column_idx_map.contains(mapped_name)) {
827
0
                        auto file_column_idx = file_column_idx_map.at(mapped_name);
828
0
                        std::shared_ptr<TableSchemaChangeHelper::Node> field_node = nullptr;
829
0
                        const auto& file_field = _data_file_type_desc->getSubtype(file_column_idx);
830
0
                        RETURN_IF_ERROR(BuildTableInfoUtil::by_orc_field_id(
831
0
                                *table_field.field_ptr, file_field, ICEBERG_ORC_ATTRIBUTE,
832
0
                                exist_field_id, field_node));
833
0
                        table_info_node_ptr->add_children(
834
0
                                table_column_name,
835
0
                                _data_file_type_desc->getFieldName(file_column_idx), field_node);
836
0
                        have_mapping = true;
837
0
                        break;
838
0
                    }
839
0
                }
840
0
                if (!have_mapping) {
841
0
                    table_info_node_ptr->add_not_exist_children(table_column_name);
842
0
                }
843
0
            }
844
0
        }
845
0
    }
846
847
1
    return orc_reader->init_reader(&_all_required_col_names, _col_name_to_block_idx, conjuncts,
848
1
                                   false, tuple_descriptor, row_descriptor,
849
1
                                   not_single_slot_filter_conjuncts, slot_id_to_filter_conjuncts,
850
1
                                   table_info_node_ptr, column_ids, filter_column_ids);
851
1
}
852
853
ColumnIdResult IcebergOrcReader::_create_column_ids(const orc::Type* orc_type,
854
7
                                                    const TupleDescriptor* tuple_descriptor) {
855
    // map top-level table column iceberg_id -> orc::Type*
856
7
    std::unordered_map<int, const orc::Type*> iceberg_id_to_orc_type_map;
857
58
    for (uint64_t i = 0; i < orc_type->getSubtypeCount(); ++i) {
858
51
        auto orc_sub_type = orc_type->getSubtype(i);
859
51
        if (!orc_sub_type) continue;
860
861
51
        if (!orc_sub_type->hasAttributeKey(ICEBERG_ORC_ATTRIBUTE)) {
862
0
            continue;
863
0
        }
864
51
        int iceberg_id = std::stoi(orc_sub_type->getAttributeValue(ICEBERG_ORC_ATTRIBUTE));
865
51
        iceberg_id_to_orc_type_map[iceberg_id] = orc_sub_type;
866
51
    }
867
868
7
    std::set<uint64_t> column_ids;
869
7
    std::set<uint64_t> filter_column_ids;
870
871
    // helper to process access paths for a given top-level orc field
872
7
    auto process_access_paths = [](const orc::Type* orc_field,
873
7
                                   const std::vector<TColumnAccessPath>& access_paths,
874
14
                                   std::set<uint64_t>& out_ids) {
875
14
        process_nested_access_paths(
876
14
                orc_field, access_paths, out_ids,
877
14
                [](const orc::Type* type) { return type->getColumnId(); },
878
14
                [](const orc::Type* type) { return type->getMaximumColumnId(); },
879
14
                IcebergOrcNestedColumnUtils::extract_nested_column_ids);
880
14
    };
881
882
15
    for (const auto* slot : tuple_descriptor->slots()) {
883
15
        auto it = iceberg_id_to_orc_type_map.find(slot->col_unique_id());
884
15
        if (it == iceberg_id_to_orc_type_map.end()) {
885
            // Column not found in file
886
0
            continue;
887
0
        }
888
15
        const orc::Type* orc_field = it->second;
889
890
        // primitive (non-nested) types
891
15
        if ((slot->col_type() != TYPE_STRUCT && slot->col_type() != TYPE_ARRAY &&
892
15
             slot->col_type() != TYPE_MAP)) {
893
7
            column_ids.insert(orc_field->getColumnId());
894
7
            if (slot->is_predicate()) {
895
0
                filter_column_ids.insert(orc_field->getColumnId());
896
0
            }
897
7
            continue;
898
7
        }
899
900
        // complex types
901
8
        const auto& all_access_paths = slot->all_access_paths();
902
8
        process_access_paths(orc_field, all_access_paths, column_ids);
903
904
8
        const auto& predicate_access_paths = slot->predicate_access_paths();
905
8
        if (!predicate_access_paths.empty()) {
906
6
            process_access_paths(orc_field, predicate_access_paths, filter_column_ids);
907
6
        }
908
8
    }
909
910
7
    return ColumnIdResult(std::move(column_ids), std::move(filter_column_ids));
911
7
}
912
913
// Directly read the deletion vector using the `content_offset` and
914
// `content_size_in_bytes` provided by FE in `delete_file_desc`.
915
// These two fields indicate the location of a blob in storage.
916
// Since the current format is `deletion-vector-v1`, which does not
917
// compress any blobs, we can temporarily skip parsing the Puffin footer.
918
Status IcebergTableReader::read_deletion_vector(const std::string& data_file_path,
919
0
                                                const TIcebergDeleteFileDesc& delete_file_desc) {
920
0
    Status create_status = Status::OK();
921
0
    SCOPED_TIMER(_iceberg_profile.delete_files_read_time);
922
0
    _iceberg_delete_rows = _kv_cache->get<DeleteRows>(data_file_path, [&]() -> DeleteRows* {
923
0
        auto* delete_rows = new DeleteRows;
924
925
0
        TFileRangeDesc delete_range;
926
        // must use __set() method to make sure __isset is true
927
0
        delete_range.__set_fs_name(_range.fs_name);
928
0
        delete_range.path = delete_file_desc.path;
929
0
        delete_range.start_offset = delete_file_desc.content_offset;
930
0
        delete_range.size = delete_file_desc.content_size_in_bytes;
931
0
        delete_range.file_size = -1;
932
933
        // We may consider caching the DeletionVectorReader when reading Puffin files,
934
        // where the underlying reader is an `InMemoryFileReader` and a single data file is
935
        // split into multiple splits. However, we need to ensure that the underlying
936
        // reader supports multi-threaded access.
937
0
        DeletionVectorReader dv_reader(_state, _profile, _params, delete_range, _io_ctx);
938
0
        create_status = dv_reader.open();
939
0
        if (!create_status.ok()) [[unlikely]] {
940
0
            return nullptr;
941
0
        }
942
943
0
        size_t buffer_size = delete_range.size;
944
0
        std::vector<char> buf(buffer_size);
945
0
        if (buffer_size < 12) [[unlikely]] {
946
            // Minimum size: 4 bytes length + 4 bytes magic + 4 bytes CRC32
947
0
            create_status = Status::DataQualityError("Deletion vector file size too small: {}",
948
0
                                                     buffer_size);
949
0
            return nullptr;
950
0
        }
951
952
0
        create_status = dv_reader.read_at(delete_range.start_offset, {buf.data(), buffer_size});
953
0
        if (!create_status) [[unlikely]] {
954
0
            return nullptr;
955
0
        }
956
        // The serialized blob contains:
957
        //
958
        // Combined length of the vector and magic bytes stored as 4 bytes, big-endian
959
        // A 4-byte magic sequence, D1 D3 39 64
960
        // The vector, serialized as described below
961
        // A CRC-32 checksum of the magic bytes and serialized vector as 4 bytes, big-endian
962
963
0
        auto total_length = BigEndian::Load32(buf.data());
964
0
        if (total_length + 8 != buffer_size) [[unlikely]] {
965
0
            create_status = Status::DataQualityError(
966
0
                    "Deletion vector length mismatch, expected: {}, actual: {}", total_length + 8,
967
0
                    buffer_size);
968
0
            return nullptr;
969
0
        }
970
971
0
        constexpr static char MAGIC_NUMBER[] = {'\xD1', '\xD3', '\x39', '\x64'};
972
0
        if (memcmp(buf.data() + sizeof(total_length), MAGIC_NUMBER, 4)) [[unlikely]] {
973
0
            create_status = Status::DataQualityError("Deletion vector magic number mismatch");
974
0
            return nullptr;
975
0
        }
976
977
0
        roaring::Roaring64Map bitmap;
978
0
        SCOPED_TIMER(_iceberg_profile.parse_delete_file_time);
979
0
        try {
980
0
            bitmap = roaring::Roaring64Map::readSafe(buf.data() + 8, buffer_size - 12);
981
0
        } catch (const std::runtime_error& e) {
982
0
            create_status = Status::DataQualityError("Decode roaring bitmap failed, {}", e.what());
983
0
            return nullptr;
984
0
        }
985
        // skip CRC-32 checksum
986
987
0
        delete_rows->reserve(bitmap.cardinality());
988
0
        for (auto it = bitmap.begin(); it != bitmap.end(); it++) {
989
0
            delete_rows->push_back(*it);
990
0
        }
991
0
        COUNTER_UPDATE(_iceberg_profile.num_delete_rows, delete_rows->size());
992
0
        return delete_rows;
993
0
    });
994
995
0
    RETURN_IF_ERROR(create_status);
996
0
    if (!_iceberg_delete_rows->empty()) [[likely]] {
997
0
        set_delete_rows();
998
0
    }
999
0
    return Status::OK();
1000
0
}
1001
1002
// Similar to the code structure of IcebergOrcReader::_process_equality_delete,
1003
// but considering the significant differences in how parquet/orc obtains
1004
// attributes/column IDs, it is not easy to combine them.
1005
Status IcebergParquetReader::_process_equality_delete(
1006
0
        const std::vector<TIcebergDeleteFileDesc>& delete_files) {
1007
0
    std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>
1008
0
            partition_columns;
1009
0
    std::unordered_map<std::string, VExprContextSPtr> missing_columns;
1010
1011
0
    std::map<int, const FieldSchema*> data_file_id_to_field_schema;
1012
0
    for (int idx = 0; idx < _data_file_field_desc->size(); ++idx) {
1013
0
        auto field_schema = _data_file_field_desc->get_column(idx);
1014
0
        if (_data_file_field_desc->get_column(idx)->field_id == -1) {
1015
0
            return Status::DataQualityError("Iceberg equality delete data file missing field id.");
1016
0
        }
1017
0
        data_file_id_to_field_schema[_data_file_field_desc->get_column(idx)->field_id] =
1018
0
                field_schema;
1019
0
    }
1020
1021
0
    for (const auto& delete_file : delete_files) {
1022
0
        TFileRangeDesc delete_desc;
1023
        // must use __set() method to make sure __isset is true
1024
0
        delete_desc.__set_fs_name(_range.fs_name);
1025
0
        delete_desc.path = delete_file.path;
1026
0
        delete_desc.start_offset = 0;
1027
0
        delete_desc.size = -1;
1028
0
        delete_desc.file_size = -1;
1029
1030
0
        if (!delete_file.__isset.field_ids) [[unlikely]] {
1031
0
            return Status::InternalError(
1032
0
                    "missing delete field ids when reading equality delete file");
1033
0
        }
1034
0
        auto& read_column_field_ids = delete_file.field_ids;
1035
0
        std::set<int> read_column_field_ids_set;
1036
0
        for (const auto& field_id : read_column_field_ids) {
1037
0
            read_column_field_ids_set.insert(field_id);
1038
0
            _equality_delete_col_ids.insert(field_id);
1039
0
        }
1040
1041
0
        auto delete_reader = ParquetReader::create_unique(
1042
0
                _profile, _params, delete_desc, READ_DELETE_FILE_BATCH_SIZE,
1043
0
                &_state->timezone_obj(), _io_ctx, _state, _meta_cache);
1044
0
        RETURN_IF_ERROR(delete_reader->init_schema_reader());
1045
1046
        // the column that to read equality delete file.
1047
        // (delete file may be have extra columns that don't need to read)
1048
0
        std::vector<std::string> delete_col_names;
1049
0
        std::vector<DataTypePtr> delete_col_types;
1050
0
        std::vector<int> delete_col_ids;
1051
0
        std::unordered_map<std::string, uint32_t> delete_col_name_to_block_idx;
1052
1053
0
        const FieldDescriptor* delete_field_desc = nullptr;
1054
0
        RETURN_IF_ERROR(delete_reader->get_file_metadata_schema(&delete_field_desc));
1055
0
        DCHECK(delete_field_desc != nullptr);
1056
1057
0
        auto eq_file_node = std::make_shared<TableSchemaChangeHelper::StructNode>();
1058
0
        for (const auto& delete_file_field : delete_field_desc->get_fields_schema()) {
1059
0
            if (delete_file_field.field_id == -1) [[unlikely]] { // missing delete_file_field id
1060
                // equality delete file must have delete_file_field id to match column.
1061
0
                return Status::DataQualityError(
1062
0
                        "missing delete_file_field id when reading equality delete file");
1063
0
            } else if (read_column_field_ids_set.contains(delete_file_field.field_id)) {
1064
                // the column that need to read.
1065
0
                if (delete_file_field.children.size() > 0) [[unlikely]] { // complex column
1066
0
                    return Status::InternalError(
1067
0
                            "can not support read complex column in equality delete file");
1068
0
                } else if (!data_file_id_to_field_schema.contains(delete_file_field.field_id))
1069
0
                        [[unlikely]] {
1070
0
                    return Status::DataQualityError(
1071
0
                            "can not find delete field id in data file schema when reading "
1072
0
                            "equality delete file");
1073
0
                }
1074
0
                auto data_file_field = data_file_id_to_field_schema[delete_file_field.field_id];
1075
0
                if (data_file_field->data_type->get_primitive_type() !=
1076
0
                    delete_file_field.data_type->get_primitive_type()) [[unlikely]] {
1077
0
                    return Status::NotSupported(
1078
0
                            "Not Support type change in equality delete, field: {}, delete "
1079
0
                            "file type: {}, data file type: {}",
1080
0
                            delete_file_field.field_id, delete_file_field.data_type->get_name(),
1081
0
                            data_file_field->data_type->get_name());
1082
0
                }
1083
1084
0
                std::string filed_lower_name = to_lower(delete_file_field.name);
1085
0
                eq_file_node->add_children(filed_lower_name, delete_file_field.name,
1086
0
                                           std::make_shared<TableSchemaChangeHelper::ScalarNode>());
1087
1088
0
                delete_col_ids.emplace_back(delete_file_field.field_id);
1089
0
                delete_col_names.emplace_back(filed_lower_name);
1090
0
                delete_col_types.emplace_back(make_nullable(delete_file_field.data_type));
1091
1092
0
                read_column_field_ids_set.erase(delete_file_field.field_id);
1093
0
            } else {
1094
                // delete file may be have extra columns that don't need to read
1095
0
            }
1096
0
        }
1097
0
        if (!read_column_field_ids_set.empty()) [[unlikely]] {
1098
0
            return Status::DataQualityError("some field ids not found in equality delete file.");
1099
0
        }
1100
1101
0
        for (uint32_t idx = 0; idx < delete_col_names.size(); ++idx) {
1102
0
            delete_col_name_to_block_idx[delete_col_names[idx]] = idx;
1103
0
        }
1104
0
        phmap::flat_hash_map<int, std::vector<std::shared_ptr<ColumnPredicate>>> tmp;
1105
0
        RETURN_IF_ERROR(delete_reader->init_reader(delete_col_names, &delete_col_name_to_block_idx,
1106
0
                                                   {}, tmp, nullptr, nullptr, nullptr, nullptr,
1107
0
                                                   nullptr, eq_file_node, false));
1108
0
        RETURN_IF_ERROR(delete_reader->set_fill_columns(partition_columns, missing_columns));
1109
1110
0
        if (!_equality_delete_block_map.contains(delete_col_ids)) {
1111
0
            _equality_delete_block_map.emplace(delete_col_ids, _equality_delete_blocks.size());
1112
0
            Block block;
1113
0
            _generate_equality_delete_block(&block, delete_col_names, delete_col_types);
1114
0
            _equality_delete_blocks.emplace_back(block);
1115
0
        }
1116
0
        Block& eq_file_block = _equality_delete_blocks[_equality_delete_block_map[delete_col_ids]];
1117
0
        bool eof = false;
1118
0
        while (!eof) {
1119
0
            Block tmp_block;
1120
0
            _generate_equality_delete_block(&tmp_block, delete_col_names, delete_col_types);
1121
0
            size_t read_rows = 0;
1122
0
            RETURN_IF_ERROR(delete_reader->get_next_block(&tmp_block, &read_rows, &eof));
1123
0
            if (read_rows > 0) {
1124
0
                MutableBlock mutable_block(&eq_file_block);
1125
0
                RETURN_IF_ERROR(mutable_block.merge(tmp_block));
1126
0
            }
1127
0
        }
1128
0
    }
1129
1130
0
    for (const auto& [delete_col_ids, block_idx] : _equality_delete_block_map) {
1131
0
        auto& eq_file_block = _equality_delete_blocks[block_idx];
1132
0
        auto equality_delete_impl =
1133
0
                EqualityDeleteBase::get_delete_impl(&eq_file_block, delete_col_ids);
1134
0
        RETURN_IF_ERROR(equality_delete_impl->init(_profile));
1135
0
        _equality_delete_impls.emplace_back(std::move(equality_delete_impl));
1136
0
    }
1137
0
    return Status::OK();
1138
0
}
1139
1140
Status IcebergOrcReader::_process_equality_delete(
1141
0
        const std::vector<TIcebergDeleteFileDesc>& delete_files) {
1142
0
    std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>
1143
0
            partition_columns;
1144
0
    std::unordered_map<std::string, VExprContextSPtr> missing_columns;
1145
1146
0
    std::map<int, int> data_file_id_to_field_idx;
1147
0
    for (int idx = 0; idx < _data_file_type_desc->getSubtypeCount(); ++idx) {
1148
0
        if (!_data_file_type_desc->getSubtype(idx)->hasAttributeKey(ICEBERG_ORC_ATTRIBUTE)) {
1149
0
            return Status::DataQualityError("Iceberg equality delete data file missing field id.");
1150
0
        }
1151
0
        auto field_id = std::stoi(
1152
0
                _data_file_type_desc->getSubtype(idx)->getAttributeValue(ICEBERG_ORC_ATTRIBUTE));
1153
0
        data_file_id_to_field_idx[field_id] = idx;
1154
0
    }
1155
1156
0
    for (const auto& delete_file : delete_files) {
1157
0
        TFileRangeDesc delete_desc;
1158
        // must use __set() method to make sure __isset is true
1159
0
        delete_desc.__set_fs_name(_range.fs_name);
1160
0
        delete_desc.path = delete_file.path;
1161
0
        delete_desc.start_offset = 0;
1162
0
        delete_desc.size = -1;
1163
0
        delete_desc.file_size = -1;
1164
1165
0
        if (!delete_file.__isset.field_ids) [[unlikely]] {
1166
0
            return Status::InternalError(
1167
0
                    "missing delete field ids when reading equality delete file");
1168
0
        }
1169
0
        auto& read_column_field_ids = delete_file.field_ids;
1170
0
        std::set<int> read_column_field_ids_set;
1171
0
        for (const auto& field_id : read_column_field_ids) {
1172
0
            read_column_field_ids_set.insert(field_id);
1173
0
            _equality_delete_col_ids.insert(field_id);
1174
0
        }
1175
1176
0
        auto delete_reader = OrcReader::create_unique(_profile, _state, _params, delete_desc,
1177
0
                                                      READ_DELETE_FILE_BATCH_SIZE,
1178
0
                                                      _state->timezone(), _io_ctx, _meta_cache);
1179
0
        RETURN_IF_ERROR(delete_reader->init_schema_reader());
1180
        // delete file schema
1181
0
        std::vector<std::string> delete_file_col_names;
1182
0
        std::vector<DataTypePtr> delete_file_col_types;
1183
0
        RETURN_IF_ERROR(
1184
0
                delete_reader->get_parsed_schema(&delete_file_col_names, &delete_file_col_types));
1185
1186
        // the column that to read equality delete file.
1187
        // (delete file maybe have extra columns that don't need to read)
1188
0
        std::vector<std::string> delete_col_names;
1189
0
        std::vector<DataTypePtr> delete_col_types;
1190
0
        std::vector<int> delete_col_ids;
1191
0
        std::unordered_map<std::string, uint32_t> delete_col_name_to_block_idx;
1192
1193
0
        const orc::Type* delete_field_desc = nullptr;
1194
0
        RETURN_IF_ERROR(delete_reader->get_file_type(&delete_field_desc));
1195
0
        DCHECK(delete_field_desc != nullptr);
1196
1197
0
        auto eq_file_node = std::make_shared<TableSchemaChangeHelper::StructNode>();
1198
1199
0
        for (size_t idx = 0; idx < delete_field_desc->getSubtypeCount(); idx++) {
1200
0
            auto delete_file_field = delete_field_desc->getSubtype(idx);
1201
1202
0
            if (!delete_file_field->hasAttributeKey(ICEBERG_ORC_ATTRIBUTE))
1203
0
                    [[unlikely]] { // missing delete_file_field id
1204
                // equality delete file must have delete_file_field id to match column.
1205
0
                return Status::DataQualityError(
1206
0
                        "missing delete_file_field id when reading equality delete file");
1207
0
            } else {
1208
0
                auto delete_field_id =
1209
0
                        std::stoi(delete_file_field->getAttributeValue(ICEBERG_ORC_ATTRIBUTE));
1210
0
                if (read_column_field_ids_set.contains(delete_field_id)) {
1211
                    // the column that need to read.
1212
0
                    if (is_complex_type(delete_file_col_types[idx]->get_primitive_type()))
1213
0
                            [[unlikely]] {
1214
0
                        return Status::InternalError(
1215
0
                                "can not support read complex column in equality delete file.");
1216
0
                    } else if (!data_file_id_to_field_idx.contains(delete_field_id)) [[unlikely]] {
1217
0
                        return Status::DataQualityError(
1218
0
                                "can not find delete field id in data file schema when reading "
1219
0
                                "equality delete file");
1220
0
                    }
1221
1222
0
                    auto data_file_field = _data_file_type_desc->getSubtype(
1223
0
                            data_file_id_to_field_idx[delete_field_id]);
1224
1225
0
                    if (delete_file_field->getKind() != data_file_field->getKind()) [[unlikely]] {
1226
0
                        return Status::NotSupported(
1227
0
                                "Not Support type change in equality delete, field: {}, delete "
1228
0
                                "file type: {}, data file type: {}",
1229
0
                                delete_field_id, delete_file_field->getKind(),
1230
0
                                data_file_field->getKind());
1231
0
                    }
1232
0
                    std::string filed_lower_name = to_lower(delete_field_desc->getFieldName(idx));
1233
0
                    eq_file_node->add_children(
1234
0
                            filed_lower_name, delete_field_desc->getFieldName(idx),
1235
0
                            std::make_shared<TableSchemaChangeHelper::ScalarNode>());
1236
1237
0
                    delete_col_ids.emplace_back(delete_field_id);
1238
0
                    delete_col_names.emplace_back(filed_lower_name);
1239
0
                    delete_col_types.emplace_back(make_nullable(delete_file_col_types[idx]));
1240
0
                    read_column_field_ids_set.erase(delete_field_id);
1241
0
                }
1242
0
            }
1243
0
        }
1244
0
        if (!read_column_field_ids_set.empty()) [[unlikely]] {
1245
0
            return Status::DataQualityError("some field ids not found in equality delete file.");
1246
0
        }
1247
1248
0
        for (uint32_t idx = 0; idx < delete_col_names.size(); ++idx) {
1249
0
            delete_col_name_to_block_idx[delete_col_names[idx]] = idx;
1250
0
        }
1251
1252
0
        RETURN_IF_ERROR(delete_reader->init_reader(&delete_col_names, &delete_col_name_to_block_idx,
1253
0
                                                   {}, false, nullptr, nullptr, nullptr, nullptr,
1254
0
                                                   eq_file_node));
1255
0
        RETURN_IF_ERROR(delete_reader->set_fill_columns(partition_columns, missing_columns));
1256
1257
0
        if (!_equality_delete_block_map.contains(delete_col_ids)) {
1258
0
            _equality_delete_block_map.emplace(delete_col_ids, _equality_delete_blocks.size());
1259
0
            Block block;
1260
0
            _generate_equality_delete_block(&block, delete_col_names, delete_col_types);
1261
0
            _equality_delete_blocks.emplace_back(block);
1262
0
        }
1263
0
        Block& eq_file_block = _equality_delete_blocks[_equality_delete_block_map[delete_col_ids]];
1264
0
        bool eof = false;
1265
0
        while (!eof) {
1266
0
            Block tmp_block;
1267
0
            _generate_equality_delete_block(&tmp_block, delete_col_names, delete_col_types);
1268
0
            size_t read_rows = 0;
1269
0
            RETURN_IF_ERROR(delete_reader->get_next_block(&tmp_block, &read_rows, &eof));
1270
0
            if (read_rows > 0) {
1271
0
                MutableBlock mutable_block(&eq_file_block);
1272
0
                RETURN_IF_ERROR(mutable_block.merge(tmp_block));
1273
0
            }
1274
0
        }
1275
0
    }
1276
1277
0
    for (const auto& [delete_col_ids, block_idx] : _equality_delete_block_map) {
1278
0
        auto& eq_file_block = _equality_delete_blocks[block_idx];
1279
0
        auto equality_delete_impl =
1280
0
                EqualityDeleteBase::get_delete_impl(&eq_file_block, delete_col_ids);
1281
0
        RETURN_IF_ERROR(equality_delete_impl->init(_profile));
1282
0
        _equality_delete_impls.emplace_back(std::move(equality_delete_impl));
1283
0
    }
1284
0
    return Status::OK();
1285
0
}
1286
} // namespace doris