Coverage Report

Created: 2026-03-30 11:06

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/format/table/iceberg_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "format/table/iceberg_reader.h"
19
20
#include <gen_cpp/Descriptors_types.h>
21
#include <gen_cpp/Metrics_types.h>
22
#include <gen_cpp/PlanNodes_types.h>
23
#include <gen_cpp/parquet_types.h>
24
#include <glog/logging.h>
25
#include <parallel_hashmap/phmap.h>
26
#include <rapidjson/document.h>
27
28
#include <algorithm>
29
#include <cstring>
30
#include <functional>
31
#include <memory>
32
#include <set>
33
34
#include "common/compiler_util.h" // IWYU pragma: keep
35
#include "common/status.h"
36
#include "core/assert_cast.h"
37
#include "core/block/block.h"
38
#include "core/block/column_with_type_and_name.h"
39
#include "core/column/column.h"
40
#include "core/data_type/data_type_factory.hpp"
41
#include "exprs/aggregate/aggregate_function.h"
42
#include "format/format_common.h"
43
#include "format/generic_reader.h"
44
#include "format/orc/vorc_reader.h"
45
#include "format/parquet/schema_desc.h"
46
#include "format/parquet/vparquet_column_chunk_reader.h"
47
#include "format/table/deletion_vector_reader.h"
48
#include "format/table/iceberg/iceberg_orc_nested_column_utils.h"
49
#include "format/table/iceberg/iceberg_parquet_nested_column_utils.h"
50
#include "format/table/iceberg_delete_file_reader_helper.h"
51
#include "format/table/nested_column_access_helper.h"
52
#include "format/table/table_format_reader.h"
53
#include "runtime/runtime_state.h"
54
#include "util/coding.h"
55
56
namespace cctz {
57
#include "common/compile_check_begin.h"
58
class time_zone;
59
} // namespace cctz
60
namespace doris {
61
class RowDescriptor;
62
class SlotDescriptor;
63
class TupleDescriptor;
64
65
namespace io {
66
struct IOContext;
67
} // namespace io
68
class VExprContext;
69
} // namespace doris
70
71
namespace doris {
72
namespace {
73
74
class GroupedDeleteRowsVisitor final : public IcebergPositionDeleteVisitor {
75
public:
76
    using DeleteRows = std::vector<int64_t>;
77
    using DeleteFile = phmap::parallel_flat_hash_map<
78
            std::string, std::unique_ptr<DeleteRows>, std::hash<std::string>, std::equal_to<>,
79
            std::allocator<std::pair<const std::string, std::unique_ptr<DeleteRows>>>, 8,
80
            std::mutex>;
81
82
    explicit GroupedDeleteRowsVisitor(DeleteFile* position_delete)
83
0
            : _position_delete(position_delete) {}
84
85
0
    Status visit(const std::string& file_path, int64_t pos) override {
86
0
        if (_position_delete == nullptr) {
87
0
            return Status::InvalidArgument("position delete map is null");
88
0
        }
89
90
0
        auto iter = _position_delete->find(file_path);
91
0
        DeleteRows* delete_rows = nullptr;
92
0
        if (iter == _position_delete->end()) {
93
0
            delete_rows = new DeleteRows;
94
0
            (*_position_delete)[file_path] = std::unique_ptr<DeleteRows>(delete_rows);
95
0
        } else {
96
0
            delete_rows = iter->second.get();
97
0
        }
98
0
        delete_rows->push_back(pos);
99
0
        return Status::OK();
100
0
    }
101
102
private:
103
    DeleteFile* _position_delete;
104
};
105
106
} // namespace
107
108
const std::string IcebergOrcReader::ICEBERG_ORC_ATTRIBUTE = "iceberg.id";
109
110
IcebergTableReader::IcebergTableReader(std::unique_ptr<GenericReader> file_format_reader,
111
                                       RuntimeProfile* profile, RuntimeState* state,
112
                                       const TFileScanRangeParams& params,
113
                                       const TFileRangeDesc& range, ShardedKVCache* kv_cache,
114
                                       io::IOContext* io_ctx, FileMetaCache* meta_cache)
115
16
        : TableFormatReader(std::move(file_format_reader), state, profile, params, range, io_ctx,
116
16
                            meta_cache),
117
16
          _kv_cache(kv_cache) {
118
16
    static const char* iceberg_profile = "IcebergProfile";
119
16
    ADD_TIMER(_profile, iceberg_profile);
120
16
    _iceberg_profile.num_delete_files =
121
16
            ADD_CHILD_COUNTER(_profile, "NumDeleteFiles", TUnit::UNIT, iceberg_profile);
122
16
    _iceberg_profile.num_delete_rows =
123
16
            ADD_CHILD_COUNTER(_profile, "NumDeleteRows", TUnit::UNIT, iceberg_profile);
124
16
    _iceberg_profile.delete_files_read_time =
125
16
            ADD_CHILD_TIMER(_profile, "DeleteFileReadTime", iceberg_profile);
126
16
    _iceberg_profile.delete_rows_sort_time =
127
16
            ADD_CHILD_TIMER(_profile, "DeleteRowsSortTime", iceberg_profile);
128
16
    _iceberg_profile.parse_delete_file_time =
129
16
            ADD_CHILD_TIMER(_profile, "ParseDeleteFileTime", iceberg_profile);
130
16
}
131
132
2
Status IcebergTableReader::get_next_block_inner(Block* block, size_t* read_rows, bool* eof) {
133
2
    RETURN_IF_ERROR(_expand_block_if_need(block));
134
135
2
    RETURN_IF_ERROR(_file_format_reader->get_next_block(block, read_rows, eof));
136
137
2
    if (_equality_delete_impls.size() > 0) {
138
0
        std::unique_ptr<IColumn::Filter> filter =
139
0
                std::make_unique<IColumn::Filter>(block->rows(), 1);
140
0
        for (auto& equality_delete_impl : _equality_delete_impls) {
141
0
            RETURN_IF_ERROR(equality_delete_impl->filter_data_block(
142
0
                    block, _col_name_to_block_idx, _id_to_block_column_name, *filter));
143
0
        }
144
0
        Block::filter_block_internal(block, *filter, block->columns());
145
0
    }
146
147
2
    *read_rows = block->rows();
148
2
    return _shrink_block_if_need(block);
149
2
}
150
151
2
Status IcebergTableReader::init_row_filters() {
152
    // We get the count value by doris's be, so we don't need to read the delete file
153
2
    if (_push_down_agg_type == TPushAggOp::type::COUNT && _table_level_row_count > 0) {
154
0
        return Status::OK();
155
0
    }
156
157
2
    const auto& table_desc = _range.table_format_params.iceberg_params;
158
2
    const auto& version = table_desc.format_version;
159
2
    if (version < MIN_SUPPORT_DELETE_FILES_VERSION) {
160
2
        return Status::OK();
161
2
    }
162
163
0
    auto* parquet_reader = dynamic_cast<ParquetReader*>(_file_format_reader.get());
164
0
    auto* orc_reader = dynamic_cast<OrcReader*>(_file_format_reader.get());
165
166
    // Initialize file information for $row_id generation
167
    // Extract from table_desc which contains current file's metadata
168
0
    if (_need_row_id_column) {
169
0
        std::string file_path = table_desc.original_file_path;
170
0
        int32_t partition_spec_id = 0;
171
0
        std::string partition_data_json;
172
0
        if (table_desc.__isset.partition_spec_id) {
173
0
            partition_spec_id = table_desc.partition_spec_id;
174
0
        }
175
0
        if (table_desc.__isset.partition_data_json) {
176
0
            partition_data_json = table_desc.partition_data_json;
177
0
        }
178
179
0
        if (parquet_reader != nullptr) {
180
0
            parquet_reader->set_iceberg_rowid_params(file_path, partition_spec_id,
181
0
                                                     partition_data_json, _row_id_column_position);
182
0
        } else if (orc_reader != nullptr) {
183
0
            orc_reader->set_iceberg_rowid_params(file_path, partition_spec_id, partition_data_json,
184
0
                                                 _row_id_column_position);
185
0
        }
186
0
        LOG(INFO) << "Initialized $row_id generation for file: " << file_path
187
0
                  << ", partition_spec_id: " << partition_spec_id;
188
0
    }
189
190
0
    std::vector<TIcebergDeleteFileDesc> position_delete_files;
191
0
    std::vector<TIcebergDeleteFileDesc> equality_delete_files;
192
0
    std::vector<TIcebergDeleteFileDesc> deletion_vector_files;
193
0
    for (const TIcebergDeleteFileDesc& desc : table_desc.delete_files) {
194
0
        if (desc.content == POSITION_DELETE) {
195
0
            position_delete_files.emplace_back(desc);
196
0
        } else if (desc.content == EQUALITY_DELETE) {
197
0
            equality_delete_files.emplace_back(desc);
198
0
        } else if (desc.content == DELETION_VECTOR) {
199
0
            deletion_vector_files.emplace_back(desc);
200
0
        }
201
0
    }
202
203
0
    if (!equality_delete_files.empty()) {
204
0
        RETURN_IF_ERROR(_process_equality_delete(equality_delete_files));
205
0
        _file_format_reader->set_push_down_agg_type(TPushAggOp::NONE);
206
0
    }
207
208
0
    if (!deletion_vector_files.empty()) {
209
0
        if (deletion_vector_files.size() != 1) [[unlikely]] {
210
            /*
211
             * Deletion vectors are a binary representation of deletes for a single data file that is more efficient
212
             * at execution time than position delete files. Unlike equality or position delete files, there can be
213
             * at most one deletion vector for a given data file in a snapshot.
214
             */
215
0
            return Status::DataQualityError("This iceberg data file has multiple DVs.");
216
0
        }
217
0
        RETURN_IF_ERROR(
218
0
                read_deletion_vector(table_desc.original_file_path, deletion_vector_files[0]));
219
220
0
        _file_format_reader->set_push_down_agg_type(TPushAggOp::NONE);
221
        // Readers can safely ignore position delete files if there is a DV for a data file.
222
0
    } else if (!position_delete_files.empty()) {
223
0
        RETURN_IF_ERROR(
224
0
                _position_delete_base(table_desc.original_file_path, position_delete_files));
225
0
        _file_format_reader->set_push_down_agg_type(TPushAggOp::NONE);
226
0
    }
227
228
0
    COUNTER_UPDATE(_iceberg_profile.num_delete_files, table_desc.delete_files.size());
229
0
    return Status::OK();
230
0
}
231
232
void IcebergTableReader::_generate_equality_delete_block(
233
        Block* block, const std::vector<std::string>& equality_delete_col_names,
234
0
        const std::vector<DataTypePtr>& equality_delete_col_types) {
235
0
    for (int i = 0; i < equality_delete_col_names.size(); ++i) {
236
0
        DataTypePtr data_type = make_nullable(equality_delete_col_types[i]);
237
0
        MutableColumnPtr data_column = data_type->create_column();
238
0
        block->insert(ColumnWithTypeAndName(std::move(data_column), data_type,
239
0
                                            equality_delete_col_names[i]));
240
0
    }
241
0
}
242
243
2
Status IcebergTableReader::_expand_block_if_need(Block* block) {
244
2
    std::set<std::string> names;
245
2
    auto block_names = block->get_names();
246
2
    names.insert(block_names.begin(), block_names.end());
247
2
    for (auto& col : _expand_columns) {
248
0
        col.column->assume_mutable()->clear();
249
0
        if (names.contains(col.name)) {
250
0
            return Status::InternalError("Wrong expand column '{}'", col.name);
251
0
        }
252
0
        names.insert(col.name);
253
0
        (*_col_name_to_block_idx)[col.name] = static_cast<uint32_t>(block->columns());
254
0
        block->insert(col);
255
0
    }
256
2
    return Status::OK();
257
2
}
258
259
2
Status IcebergTableReader::_shrink_block_if_need(Block* block) {
260
2
    std::set<size_t> positions_to_erase;
261
2
    for (const std::string& expand_col : _expand_col_names) {
262
0
        if (!_col_name_to_block_idx->contains(expand_col)) {
263
0
            return Status::InternalError("Wrong erase column '{}', block: {}", expand_col,
264
0
                                         block->dump_names());
265
0
        }
266
0
        positions_to_erase.emplace((*_col_name_to_block_idx)[expand_col]);
267
0
    }
268
2
    block->erase(positions_to_erase);
269
2
    for (const std::string& expand_col : _expand_col_names) {
270
0
        _col_name_to_block_idx->erase(expand_col);
271
0
    }
272
2
    return Status::OK();
273
2
}
274
275
Status IcebergTableReader::_position_delete_base(
276
0
        const std::string data_file_path, const std::vector<TIcebergDeleteFileDesc>& delete_files) {
277
0
    std::vector<DeleteRows*> delete_rows_array;
278
0
    int64_t num_delete_rows = 0;
279
0
    for (const auto& delete_file : delete_files) {
280
0
        SCOPED_TIMER(_iceberg_profile.delete_files_read_time);
281
0
        Status create_status = Status::OK();
282
0
        auto* delete_file_cache = _kv_cache->get<DeleteFile>(
283
0
                _delet_file_cache_key(delete_file.path), [&]() -> DeleteFile* {
284
0
                    auto* position_delete = new DeleteFile;
285
0
                    create_status = _read_position_delete_file(delete_file, position_delete);
286
287
0
                    if (!create_status) {
288
0
                        return nullptr;
289
0
                    }
290
291
0
                    return position_delete;
292
0
                });
293
0
        if (create_status.is<ErrorCode::END_OF_FILE>()) {
294
0
            continue;
295
0
        } else if (!create_status.ok()) {
296
0
            return create_status;
297
0
        }
298
299
0
        DeleteFile& delete_file_map = *((DeleteFile*)delete_file_cache);
300
0
        auto get_value = [&](const auto& v) {
301
0
            DeleteRows* row_ids = v.second.get();
302
0
            if (!row_ids->empty()) {
303
0
                delete_rows_array.emplace_back(row_ids);
304
0
                num_delete_rows += row_ids->size();
305
0
            }
306
0
        };
307
0
        delete_file_map.if_contains(data_file_path, get_value);
308
0
    }
309
    // Use a KV cache to store the delete rows corresponding to a data file path.
310
    // The Parquet/ORC reader holds a reference (pointer) to this cached entry.
311
    // This allows delete rows to be reused when a single data file is split into
312
    // multiple splits, avoiding excessive memory usage when delete rows are large.
313
0
    if (num_delete_rows > 0) {
314
0
        SCOPED_TIMER(_iceberg_profile.delete_rows_sort_time);
315
0
        _iceberg_delete_rows =
316
0
                _kv_cache->get<DeleteRows>(data_file_path,
317
0
                                           [&]() -> DeleteRows* {
318
0
                                               auto* data_file_position_delete = new DeleteRows;
319
0
                                               _sort_delete_rows(delete_rows_array, num_delete_rows,
320
0
                                                                 *data_file_position_delete);
321
322
0
                                               return data_file_position_delete;
323
0
                                           }
324
325
0
                );
326
0
        set_delete_rows();
327
0
        COUNTER_UPDATE(_iceberg_profile.num_delete_rows, num_delete_rows);
328
0
    }
329
0
    return Status::OK();
330
0
}
331
332
Status IcebergTableReader::_read_position_delete_file(const TIcebergDeleteFileDesc& delete_file,
333
0
                                                      DeleteFile* position_delete) {
334
0
    GroupedDeleteRowsVisitor visitor(position_delete);
335
0
    IcebergDeleteFileReaderOptions options;
336
0
    options.state = _state;
337
0
    options.profile = _profile;
338
0
    options.scan_params = &_params;
339
0
    options.io_ctx = _io_ctx;
340
0
    options.meta_cache = _meta_cache;
341
0
    options.fs_name = &_range.fs_name;
342
0
    options.batch_size = READ_DELETE_FILE_BATCH_SIZE;
343
0
    return read_iceberg_position_delete_file(delete_file, options, &visitor);
344
0
}
345
346
/**
347
 * https://iceberg.apache.org/spec/#position-delete-files
348
 * The rows in the delete file must be sorted by file_path then position to optimize filtering rows while scanning.
349
 * Sorting by file_path allows filter pushdown by file in columnar storage formats.
350
 * Sorting by position allows filtering rows while scanning, to avoid keeping deletes in memory.
351
 */
352
void IcebergTableReader::_sort_delete_rows(
353
        const std::vector<std::vector<int64_t>*>& delete_rows_array, int64_t num_delete_rows,
354
0
        std::vector<int64_t>& result) {
355
0
    if (delete_rows_array.empty()) {
356
0
        return;
357
0
    }
358
0
    if (delete_rows_array.size() == 1) {
359
0
        result.resize(num_delete_rows);
360
0
        memcpy(result.data(), delete_rows_array.front()->data(), sizeof(int64_t) * num_delete_rows);
361
0
        return;
362
0
    }
363
0
    if (delete_rows_array.size() == 2) {
364
0
        result.resize(num_delete_rows);
365
0
        std::merge(delete_rows_array.front()->begin(), delete_rows_array.front()->end(),
366
0
                   delete_rows_array.back()->begin(), delete_rows_array.back()->end(),
367
0
                   result.begin());
368
0
        return;
369
0
    }
370
371
0
    using vec_pair = std::pair<std::vector<int64_t>::iterator, std::vector<int64_t>::iterator>;
372
0
    result.resize(num_delete_rows);
373
0
    auto row_id_iter = result.begin();
374
0
    auto iter_end = result.end();
375
0
    std::vector<vec_pair> rows_array;
376
0
    for (auto* rows : delete_rows_array) {
377
0
        if (!rows->empty()) {
378
0
            rows_array.emplace_back(rows->begin(), rows->end());
379
0
        }
380
0
    }
381
0
    size_t array_size = rows_array.size();
382
0
    while (row_id_iter != iter_end) {
383
0
        int64_t min_index = 0;
384
0
        int64_t min = *rows_array[0].first;
385
0
        for (size_t i = 0; i < array_size; ++i) {
386
0
            if (*rows_array[i].first < min) {
387
0
                min_index = i;
388
0
                min = *rows_array[i].first;
389
0
            }
390
0
        }
391
0
        *row_id_iter++ = min;
392
0
        rows_array[min_index].first++;
393
0
        if (UNLIKELY(rows_array[min_index].first == rows_array[min_index].second)) {
394
0
            rows_array.erase(rows_array.begin() + min_index);
395
0
            array_size--;
396
0
        }
397
0
    }
398
0
}
399
400
Status IcebergParquetReader::init_reader(
401
        const std::vector<std::string>& file_col_names,
402
        std::unordered_map<std::string, uint32_t>* col_name_to_block_idx,
403
        const VExprContextSPtrs& conjuncts,
404
        phmap::flat_hash_map<int, std::vector<std::shared_ptr<ColumnPredicate>>>&
405
                slot_id_to_predicates,
406
        const TupleDescriptor* tuple_descriptor, const RowDescriptor* row_descriptor,
407
        const std::unordered_map<std::string, int>* colname_to_slot_id,
408
        const VExprContextSPtrs* not_single_slot_filter_conjuncts,
409
1
        const std::unordered_map<int, VExprContextSPtrs>* slot_id_to_filter_conjuncts) {
410
1
    _file_format = Fileformat::PARQUET;
411
1
    _col_name_to_block_idx = col_name_to_block_idx;
412
1
    auto* parquet_reader = static_cast<ParquetReader*>(_file_format_reader.get());
413
1
    RETURN_IF_ERROR(parquet_reader->get_file_metadata_schema(&_data_file_field_desc));
414
1
    DCHECK(_data_file_field_desc != nullptr);
415
1
    if (_row_lineage_columns != nullptr) {
416
0
        const auto& table_desc = _range.table_format_params.iceberg_params;
417
0
        _row_lineage_columns->first_row_id =
418
0
                table_desc.__isset.first_row_id ? table_desc.first_row_id : -1;
419
0
        _row_lineage_columns->last_updated_sequence_number =
420
0
                table_desc.__isset.last_updated_sequence_number
421
0
                        ? table_desc.last_updated_sequence_number
422
0
                        : -1;
423
0
        parquet_reader->set_row_lineage_columns(_row_lineage_columns);
424
0
    }
425
426
1
    auto column_id_result = _create_column_ids(_data_file_field_desc, tuple_descriptor);
427
1
    auto& column_ids = column_id_result.column_ids;
428
1
    const auto& filter_column_ids = column_id_result.filter_column_ids;
429
430
1
    RETURN_IF_ERROR(init_row_filters());
431
1
    _all_required_col_names = file_col_names;
432
433
1
    if (!_params.__isset.history_schema_info || _params.history_schema_info.empty()) [[unlikely]] {
434
1
        RETURN_IF_ERROR(BuildTableInfoUtil::by_parquet_name(
435
1
                tuple_descriptor, *_data_file_field_desc, table_info_node_ptr));
436
1
    } else {
437
0
        std::set<std::string> read_col_name_set(file_col_names.begin(), file_col_names.end());
438
439
0
        bool exist_field_id = true;
440
0
        for (int idx = 0; idx < _data_file_field_desc->size(); idx++) {
441
0
            if (_data_file_field_desc->get_column(idx)->field_id == -1) {
442
                // the data file may be from hive table migrated to iceberg, field id is missing
443
0
                exist_field_id = false;
444
0
                break;
445
0
            }
446
0
        }
447
0
        const auto& table_schema = _params.history_schema_info.front().root_field;
448
449
0
        table_info_node_ptr = std::make_shared<TableSchemaChangeHelper::StructNode>();
450
0
        if (exist_field_id) {
451
            // id -> table column name. columns that need read data file.
452
0
            std::unordered_map<int, std::shared_ptr<schema::external::TField>> id_to_table_field;
453
0
            for (const auto& table_field : table_schema.fields) {
454
0
                auto field = table_field.field_ptr;
455
0
                DCHECK(field->__isset.name);
456
0
                if (!read_col_name_set.contains(field->name)) {
457
0
                    continue;
458
0
                }
459
0
                id_to_table_field.emplace(field->id, field);
460
0
            }
461
462
0
            for (int idx = 0; idx < _data_file_field_desc->size(); idx++) {
463
0
                const auto& data_file_field = _data_file_field_desc->get_column(idx);
464
0
                auto data_file_column_id = _data_file_field_desc->get_column(idx)->field_id;
465
466
0
                if (id_to_table_field.contains(data_file_column_id)) {
467
0
                    const auto& table_field = id_to_table_field[data_file_column_id];
468
469
0
                    std::shared_ptr<TableSchemaChangeHelper::Node> field_node = nullptr;
470
0
                    RETURN_IF_ERROR(BuildTableInfoUtil::by_parquet_field_id(
471
0
                            *table_field, *data_file_field, exist_field_id, field_node));
472
0
                    table_info_node_ptr->add_children(table_field->name, data_file_field->name,
473
0
                                                      field_node);
474
475
0
                    _id_to_block_column_name.emplace(data_file_column_id, table_field->name);
476
0
                    id_to_table_field.erase(data_file_column_id);
477
0
                } else if (_equality_delete_col_ids.contains(data_file_column_id)) {
478
                    // Columns that need to be read for equality delete.
479
0
                    const static std::string EQ_DELETE_PRE = "__equality_delete_column__";
480
481
                    // Construct table column names that avoid duplication with current table schema.
482
                    // As the columns currently being read may have been deleted in the latest
483
                    // table structure or have undergone a series of schema changes...
484
0
                    std::string table_column_name = EQ_DELETE_PRE + data_file_field->name;
485
0
                    table_info_node_ptr->add_children(
486
0
                            table_column_name, data_file_field->name,
487
0
                            std::make_shared<TableSchemaChangeHelper::ConstNode>());
488
489
0
                    _id_to_block_column_name.emplace(data_file_column_id, table_column_name);
490
0
                    _expand_col_names.emplace_back(table_column_name);
491
0
                    auto expand_data_type = make_nullable(data_file_field->data_type);
492
0
                    _expand_columns.emplace_back(
493
0
                            ColumnWithTypeAndName {expand_data_type->create_column(),
494
0
                                                   expand_data_type, table_column_name});
495
496
0
                    _all_required_col_names.emplace_back(table_column_name);
497
0
                    column_ids.insert(data_file_field->get_column_id());
498
0
                }
499
0
            }
500
0
            for (const auto& [id, table_field] : id_to_table_field) {
501
0
                table_info_node_ptr->add_not_exist_children(table_field->name);
502
0
            }
503
0
        } else {
504
0
            if (!_equality_delete_col_ids.empty()) [[unlikely]] {
505
0
                return Status::InternalError(
506
0
                        "Can not read missing field id data file when have equality delete");
507
0
            }
508
0
            std::map<std::string, size_t> file_column_idx_map;
509
0
            for (size_t idx = 0; idx < _data_file_field_desc->size(); idx++) {
510
0
                file_column_idx_map.emplace(_data_file_field_desc->get_column(idx)->name, idx);
511
0
            }
512
513
0
            for (const auto& table_field : table_schema.fields) {
514
0
                DCHECK(table_field.__isset.field_ptr);
515
0
                DCHECK(table_field.field_ptr->__isset.name);
516
0
                const auto& table_column_name = table_field.field_ptr->name;
517
0
                if (!read_col_name_set.contains(table_column_name)) {
518
0
                    continue;
519
0
                }
520
0
                if (!table_field.field_ptr->__isset.name_mapping ||
521
0
                    table_field.field_ptr->name_mapping.size() == 0) {
522
0
                    return Status::DataQualityError(
523
0
                            "name_mapping must be set when read missing field id data file.");
524
0
                }
525
0
                bool have_mapping = false;
526
0
                for (const auto& mapped_name : table_field.field_ptr->name_mapping) {
527
0
                    if (file_column_idx_map.contains(mapped_name)) {
528
0
                        std::shared_ptr<TableSchemaChangeHelper::Node> field_node = nullptr;
529
0
                        const auto& file_field = _data_file_field_desc->get_column(
530
0
                                file_column_idx_map.at(mapped_name));
531
0
                        RETURN_IF_ERROR(BuildTableInfoUtil::by_parquet_field_id(
532
0
                                *table_field.field_ptr, *file_field, exist_field_id, field_node));
533
0
                        table_info_node_ptr->add_children(table_column_name, file_field->name,
534
0
                                                          field_node);
535
0
                        have_mapping = true;
536
0
                        break;
537
0
                    }
538
0
                }
539
0
                if (!have_mapping) {
540
0
                    table_info_node_ptr->add_not_exist_children(table_column_name);
541
0
                }
542
0
            }
543
0
        }
544
0
    }
545
546
1
    return parquet_reader->init_reader(
547
1
            _all_required_col_names, _col_name_to_block_idx, conjuncts, slot_id_to_predicates,
548
1
            tuple_descriptor, row_descriptor, colname_to_slot_id, not_single_slot_filter_conjuncts,
549
1
            slot_id_to_filter_conjuncts, table_info_node_ptr, true, column_ids, filter_column_ids);
550
1
}
551
552
ColumnIdResult IcebergParquetReader::_create_column_ids(const FieldDescriptor* field_desc,
553
7
                                                        const TupleDescriptor* tuple_descriptor) {
554
    // First, assign column IDs to the field descriptor
555
7
    auto* mutable_field_desc = const_cast<FieldDescriptor*>(field_desc);
556
7
    mutable_field_desc->assign_ids();
557
558
    // map top-level table column iceberg_id -> FieldSchema*
559
7
    std::unordered_map<int, const FieldSchema*> iceberg_id_to_field_schema_map;
560
561
58
    for (int i = 0; i < field_desc->size(); ++i) {
562
51
        auto field_schema = field_desc->get_column(i);
563
51
        if (!field_schema) continue;
564
565
51
        int iceberg_id = field_schema->field_id;
566
51
        iceberg_id_to_field_schema_map[iceberg_id] = field_schema;
567
51
    }
568
569
7
    std::set<uint64_t> column_ids;
570
7
    std::set<uint64_t> filter_column_ids;
571
572
    // helper to process access paths for a given top-level parquet field
573
7
    auto process_access_paths = [](const FieldSchema* parquet_field,
574
7
                                   const std::vector<TColumnAccessPath>& access_paths,
575
14
                                   std::set<uint64_t>& out_ids) {
576
14
        process_nested_access_paths(
577
14
                parquet_field, access_paths, out_ids,
578
14
                [](const FieldSchema* field) { return field->get_column_id(); },
579
14
                [](const FieldSchema* field) { return field->get_max_column_id(); },
580
14
                IcebergParquetNestedColumnUtils::extract_nested_column_ids);
581
14
    };
582
583
15
    for (const auto* slot : tuple_descriptor->slots()) {
584
15
        auto it = iceberg_id_to_field_schema_map.find(slot->col_unique_id());
585
15
        if (it == iceberg_id_to_field_schema_map.end()) {
586
            // Column not found in file (e.g., partition column, added column)
587
0
            continue;
588
0
        }
589
15
        auto field_schema = it->second;
590
591
        // primitive (non-nested) types: direct mapping by name
592
15
        if ((slot->col_type() != TYPE_STRUCT && slot->col_type() != TYPE_ARRAY &&
593
15
             slot->col_type() != TYPE_MAP)) {
594
7
            column_ids.insert(field_schema->column_id);
595
596
7
            if (slot->is_predicate()) {
597
0
                filter_column_ids.insert(field_schema->column_id);
598
0
            }
599
7
            continue;
600
7
        }
601
602
        // complex types:
603
8
        const auto& all_access_paths = slot->all_access_paths();
604
8
        process_access_paths(field_schema, all_access_paths, column_ids);
605
606
8
        const auto& predicate_access_paths = slot->predicate_access_paths();
607
8
        if (!predicate_access_paths.empty()) {
608
6
            process_access_paths(field_schema, predicate_access_paths, filter_column_ids);
609
6
        }
610
8
    }
611
7
    return ColumnIdResult(std::move(column_ids), std::move(filter_column_ids));
612
7
}
613
614
Status IcebergOrcReader::init_reader(
615
        const std::vector<std::string>& file_col_names,
616
        std::unordered_map<std::string, uint32_t>* col_name_to_block_idx,
617
        const VExprContextSPtrs& conjuncts, const TupleDescriptor* tuple_descriptor,
618
        const RowDescriptor* row_descriptor,
619
        const std::unordered_map<std::string, int>* colname_to_slot_id,
620
        const VExprContextSPtrs* not_single_slot_filter_conjuncts,
621
1
        const std::unordered_map<int, VExprContextSPtrs>* slot_id_to_filter_conjuncts) {
622
1
    _file_format = Fileformat::ORC;
623
1
    _col_name_to_block_idx = col_name_to_block_idx;
624
1
    auto* orc_reader = static_cast<OrcReader*>(_file_format_reader.get());
625
1
    RETURN_IF_ERROR(orc_reader->get_file_type(&_data_file_type_desc));
626
1
    std::vector<std::string> data_file_col_names;
627
1
    std::vector<DataTypePtr> data_file_col_types;
628
1
    RETURN_IF_ERROR(orc_reader->get_parsed_schema(&data_file_col_names, &data_file_col_types));
629
1
    if (_row_lineage_columns != nullptr) {
630
0
        const auto& table_desc = _range.table_format_params.iceberg_params;
631
0
        _row_lineage_columns->first_row_id =
632
0
                table_desc.__isset.first_row_id ? table_desc.first_row_id : -1;
633
0
        _row_lineage_columns->last_updated_sequence_number =
634
0
                table_desc.__isset.last_updated_sequence_number
635
0
                        ? table_desc.last_updated_sequence_number
636
0
                        : -1;
637
0
        orc_reader->set_row_lineage_columns(_row_lineage_columns);
638
0
    }
639
640
1
    auto column_id_result = _create_column_ids(_data_file_type_desc, tuple_descriptor);
641
1
    auto& column_ids = column_id_result.column_ids;
642
1
    const auto& filter_column_ids = column_id_result.filter_column_ids;
643
644
1
    RETURN_IF_ERROR(init_row_filters());
645
646
1
    _all_required_col_names = file_col_names;
647
1
    if (!_params.__isset.history_schema_info || _params.history_schema_info.empty()) [[unlikely]] {
648
1
        RETURN_IF_ERROR(BuildTableInfoUtil::by_orc_name(tuple_descriptor, _data_file_type_desc,
649
1
                                                        table_info_node_ptr));
650
1
    } else {
651
0
        std::set<std::string> read_col_name_set(file_col_names.begin(), file_col_names.end());
652
653
0
        bool exist_field_id = true;
654
0
        for (size_t idx = 0; idx < _data_file_type_desc->getSubtypeCount(); idx++) {
655
0
            if (!_data_file_type_desc->getSubtype(idx)->hasAttributeKey(ICEBERG_ORC_ATTRIBUTE)) {
656
0
                exist_field_id = false;
657
0
                break;
658
0
            }
659
0
        }
660
661
0
        const auto& table_schema = _params.history_schema_info.front().root_field;
662
0
        table_info_node_ptr = std::make_shared<TableSchemaChangeHelper::StructNode>();
663
0
        if (exist_field_id) {
664
            // id -> table column name. columns that need read data file.
665
0
            std::unordered_map<int, std::shared_ptr<schema::external::TField>> id_to_table_field;
666
0
            for (const auto& table_field : table_schema.fields) {
667
0
                auto field = table_field.field_ptr;
668
0
                DCHECK(field->__isset.name);
669
0
                if (!read_col_name_set.contains(field->name)) {
670
0
                    continue;
671
0
                }
672
673
0
                id_to_table_field.emplace(field->id, field);
674
0
            }
675
676
0
            for (int idx = 0; idx < _data_file_type_desc->getSubtypeCount(); idx++) {
677
0
                const auto& data_file_field = _data_file_type_desc->getSubtype(idx);
678
0
                auto data_file_column_id =
679
0
                        std::stoi(data_file_field->getAttributeValue(ICEBERG_ORC_ATTRIBUTE));
680
0
                auto const& file_column_name = _data_file_type_desc->getFieldName(idx);
681
682
0
                if (id_to_table_field.contains(data_file_column_id)) {
683
0
                    const auto& table_field = id_to_table_field[data_file_column_id];
684
685
0
                    std::shared_ptr<TableSchemaChangeHelper::Node> field_node = nullptr;
686
0
                    RETURN_IF_ERROR(BuildTableInfoUtil::by_orc_field_id(
687
0
                            *table_field, data_file_field, ICEBERG_ORC_ATTRIBUTE, exist_field_id,
688
0
                            field_node));
689
0
                    table_info_node_ptr->add_children(table_field->name, file_column_name,
690
0
                                                      field_node);
691
692
0
                    _id_to_block_column_name.emplace(data_file_column_id, table_field->name);
693
0
                    id_to_table_field.erase(data_file_column_id);
694
0
                } else if (_equality_delete_col_ids.contains(data_file_column_id)) {
695
                    // Columns that need to be read for equality delete.
696
0
                    const static std::string EQ_DELETE_PRE = "__equality_delete_column__";
697
698
                    // Construct table column names that avoid duplication with current table schema.
699
                    // As the columns currently being read may have been deleted in the latest
700
                    // table structure or have undergone a series of schema changes...
701
0
                    std::string table_column_name = EQ_DELETE_PRE + file_column_name;
702
0
                    table_info_node_ptr->add_children(
703
0
                            table_column_name, file_column_name,
704
0
                            std::make_shared<TableSchemaChangeHelper::ConstNode>());
705
706
0
                    _id_to_block_column_name.emplace(data_file_column_id, table_column_name);
707
0
                    _expand_col_names.emplace_back(table_column_name);
708
709
0
                    auto expand_data_type = make_nullable(data_file_col_types[idx]);
710
0
                    _expand_columns.emplace_back(
711
0
                            ColumnWithTypeAndName {expand_data_type->create_column(),
712
0
                                                   expand_data_type, table_column_name});
713
714
0
                    _all_required_col_names.emplace_back(table_column_name);
715
0
                    column_ids.insert(data_file_field->getColumnId());
716
0
                }
717
0
            }
718
0
            for (const auto& [id, table_field] : id_to_table_field) {
719
0
                table_info_node_ptr->add_not_exist_children(table_field->name);
720
0
            }
721
0
        } else {
722
0
            if (!_equality_delete_col_ids.empty()) [[unlikely]] {
723
0
                return Status::InternalError(
724
0
                        "Can not read missing field id data file when have equality delete");
725
0
            }
726
0
            std::map<std::string, size_t> file_column_idx_map;
727
0
            for (int idx = 0; idx < _data_file_type_desc->getSubtypeCount(); idx++) {
728
0
                auto const& file_column_name = _data_file_type_desc->getFieldName(idx);
729
0
                file_column_idx_map.emplace(file_column_name, idx);
730
0
            }
731
732
0
            for (const auto& table_field : table_schema.fields) {
733
0
                DCHECK(table_field.__isset.field_ptr);
734
0
                DCHECK(table_field.field_ptr->__isset.name);
735
0
                const auto& table_column_name = table_field.field_ptr->name;
736
0
                if (!read_col_name_set.contains(table_column_name)) {
737
0
                    continue;
738
0
                }
739
0
                if (!table_field.field_ptr->__isset.name_mapping ||
740
0
                    table_field.field_ptr->name_mapping.size() == 0) {
741
0
                    return Status::DataQualityError(
742
0
                            "name_mapping must be set when read missing field id data file.");
743
0
                }
744
0
                auto have_mapping = false;
745
0
                for (const auto& mapped_name : table_field.field_ptr->name_mapping) {
746
0
                    if (file_column_idx_map.contains(mapped_name)) {
747
0
                        auto file_column_idx = file_column_idx_map.at(mapped_name);
748
0
                        std::shared_ptr<TableSchemaChangeHelper::Node> field_node = nullptr;
749
0
                        const auto& file_field = _data_file_type_desc->getSubtype(file_column_idx);
750
0
                        RETURN_IF_ERROR(BuildTableInfoUtil::by_orc_field_id(
751
0
                                *table_field.field_ptr, file_field, ICEBERG_ORC_ATTRIBUTE,
752
0
                                exist_field_id, field_node));
753
0
                        table_info_node_ptr->add_children(
754
0
                                table_column_name,
755
0
                                _data_file_type_desc->getFieldName(file_column_idx), field_node);
756
0
                        have_mapping = true;
757
0
                        break;
758
0
                    }
759
0
                }
760
0
                if (!have_mapping) {
761
0
                    table_info_node_ptr->add_not_exist_children(table_column_name);
762
0
                }
763
0
            }
764
0
        }
765
0
    }
766
767
1
    return orc_reader->init_reader(&_all_required_col_names, _col_name_to_block_idx, conjuncts,
768
1
                                   false, tuple_descriptor, row_descriptor,
769
1
                                   not_single_slot_filter_conjuncts, slot_id_to_filter_conjuncts,
770
1
                                   table_info_node_ptr, column_ids, filter_column_ids);
771
1
}
772
773
ColumnIdResult IcebergOrcReader::_create_column_ids(const orc::Type* orc_type,
774
7
                                                    const TupleDescriptor* tuple_descriptor) {
775
    // map top-level table column iceberg_id -> orc::Type*
776
7
    std::unordered_map<int, const orc::Type*> iceberg_id_to_orc_type_map;
777
58
    for (uint64_t i = 0; i < orc_type->getSubtypeCount(); ++i) {
778
51
        auto orc_sub_type = orc_type->getSubtype(i);
779
51
        if (!orc_sub_type) continue;
780
781
51
        if (!orc_sub_type->hasAttributeKey(ICEBERG_ORC_ATTRIBUTE)) {
782
0
            continue;
783
0
        }
784
51
        int iceberg_id = std::stoi(orc_sub_type->getAttributeValue(ICEBERG_ORC_ATTRIBUTE));
785
51
        iceberg_id_to_orc_type_map[iceberg_id] = orc_sub_type;
786
51
    }
787
788
7
    std::set<uint64_t> column_ids;
789
7
    std::set<uint64_t> filter_column_ids;
790
791
    // helper to process access paths for a given top-level orc field
792
7
    auto process_access_paths = [](const orc::Type* orc_field,
793
7
                                   const std::vector<TColumnAccessPath>& access_paths,
794
14
                                   std::set<uint64_t>& out_ids) {
795
14
        process_nested_access_paths(
796
14
                orc_field, access_paths, out_ids,
797
14
                [](const orc::Type* type) { return type->getColumnId(); },
798
14
                [](const orc::Type* type) { return type->getMaximumColumnId(); },
799
14
                IcebergOrcNestedColumnUtils::extract_nested_column_ids);
800
14
    };
801
802
15
    for (const auto* slot : tuple_descriptor->slots()) {
803
15
        auto it = iceberg_id_to_orc_type_map.find(slot->col_unique_id());
804
15
        if (it == iceberg_id_to_orc_type_map.end()) {
805
            // Column not found in file
806
0
            continue;
807
0
        }
808
15
        const orc::Type* orc_field = it->second;
809
810
        // primitive (non-nested) types
811
15
        if ((slot->col_type() != TYPE_STRUCT && slot->col_type() != TYPE_ARRAY &&
812
15
             slot->col_type() != TYPE_MAP)) {
813
7
            column_ids.insert(orc_field->getColumnId());
814
7
            if (slot->is_predicate()) {
815
0
                filter_column_ids.insert(orc_field->getColumnId());
816
0
            }
817
7
            continue;
818
7
        }
819
820
        // complex types
821
8
        const auto& all_access_paths = slot->all_access_paths();
822
8
        process_access_paths(orc_field, all_access_paths, column_ids);
823
824
8
        const auto& predicate_access_paths = slot->predicate_access_paths();
825
8
        if (!predicate_access_paths.empty()) {
826
6
            process_access_paths(orc_field, predicate_access_paths, filter_column_ids);
827
6
        }
828
8
    }
829
830
7
    return ColumnIdResult(std::move(column_ids), std::move(filter_column_ids));
831
7
}
832
833
// Directly read the deletion vector using the `content_offset` and
834
// `content_size_in_bytes` provided by FE in `delete_file_desc`.
835
// These two fields indicate the location of a blob in storage.
836
// Since the current format is `deletion-vector-v1`, which does not
837
// compress any blobs, we can temporarily skip parsing the Puffin footer.
838
Status IcebergTableReader::read_deletion_vector(const std::string& data_file_path,
839
0
                                                const TIcebergDeleteFileDesc& delete_file_desc) {
840
0
    Status create_status = Status::OK();
841
0
    SCOPED_TIMER(_iceberg_profile.delete_files_read_time);
842
0
    _iceberg_delete_rows = _kv_cache->get<DeleteRows>(data_file_path, [&]() -> DeleteRows* {
843
0
        auto* delete_rows = new DeleteRows;
844
845
0
        TFileRangeDesc delete_range;
846
        // must use __set() method to make sure __isset is true
847
0
        delete_range.__set_fs_name(_range.fs_name);
848
0
        delete_range.path = delete_file_desc.path;
849
0
        delete_range.start_offset = delete_file_desc.content_offset;
850
0
        delete_range.size = delete_file_desc.content_size_in_bytes;
851
0
        delete_range.file_size = -1;
852
853
        // We may consider caching the DeletionVectorReader when reading Puffin files,
854
        // where the underlying reader is an `InMemoryFileReader` and a single data file is
855
        // split into multiple splits. However, we need to ensure that the underlying
856
        // reader supports multi-threaded access.
857
0
        DeletionVectorReader dv_reader(_state, _profile, _params, delete_range, _io_ctx);
858
0
        create_status = dv_reader.open();
859
0
        if (!create_status.ok()) [[unlikely]] {
860
0
            return nullptr;
861
0
        }
862
863
0
        size_t buffer_size = delete_range.size;
864
0
        std::vector<char> buf(buffer_size);
865
0
        if (buffer_size < 12) [[unlikely]] {
866
            // Minimum size: 4 bytes length + 4 bytes magic + 4 bytes CRC32
867
0
            create_status = Status::DataQualityError("Deletion vector file size too small: {}",
868
0
                                                     buffer_size);
869
0
            return nullptr;
870
0
        }
871
872
0
        create_status = dv_reader.read_at(delete_range.start_offset, {buf.data(), buffer_size});
873
0
        if (!create_status) [[unlikely]] {
874
0
            return nullptr;
875
0
        }
876
        // The serialized blob contains:
877
        //
878
        // Combined length of the vector and magic bytes stored as 4 bytes, big-endian
879
        // A 4-byte magic sequence, D1 D3 39 64
880
        // The vector, serialized as described below
881
        // A CRC-32 checksum of the magic bytes and serialized vector as 4 bytes, big-endian
882
883
0
        auto total_length = BigEndian::Load32(buf.data());
884
0
        if (total_length + 8 != buffer_size) [[unlikely]] {
885
0
            create_status = Status::DataQualityError(
886
0
                    "Deletion vector length mismatch, expected: {}, actual: {}", total_length + 8,
887
0
                    buffer_size);
888
0
            return nullptr;
889
0
        }
890
891
0
        constexpr static char MAGIC_NUMBER[] = {'\xD1', '\xD3', '\x39', '\x64'};
892
0
        if (memcmp(buf.data() + sizeof(total_length), MAGIC_NUMBER, 4)) [[unlikely]] {
893
0
            create_status = Status::DataQualityError("Deletion vector magic number mismatch");
894
0
            return nullptr;
895
0
        }
896
897
0
        roaring::Roaring64Map bitmap;
898
0
        SCOPED_TIMER(_iceberg_profile.parse_delete_file_time);
899
0
        try {
900
0
            bitmap = roaring::Roaring64Map::readSafe(buf.data() + 8, buffer_size - 12);
901
0
        } catch (const std::runtime_error& e) {
902
0
            create_status = Status::DataQualityError("Decode roaring bitmap failed, {}", e.what());
903
0
            return nullptr;
904
0
        }
905
        // skip CRC-32 checksum
906
907
0
        delete_rows->reserve(bitmap.cardinality());
908
0
        for (auto it = bitmap.begin(); it != bitmap.end(); it++) {
909
0
            delete_rows->push_back(*it);
910
0
        }
911
0
        COUNTER_UPDATE(_iceberg_profile.num_delete_rows, delete_rows->size());
912
0
        return delete_rows;
913
0
    });
914
915
0
    RETURN_IF_ERROR(create_status);
916
0
    if (!_iceberg_delete_rows->empty()) [[likely]] {
917
0
        set_delete_rows();
918
0
    }
919
0
    return Status::OK();
920
0
}
921
922
// Similar to the code structure of IcebergOrcReader::_process_equality_delete,
923
// but considering the significant differences in how parquet/orc obtains
924
// attributes/column IDs, it is not easy to combine them.
925
Status IcebergParquetReader::_process_equality_delete(
926
0
        const std::vector<TIcebergDeleteFileDesc>& delete_files) {
927
0
    std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>
928
0
            partition_columns;
929
0
    std::unordered_map<std::string, VExprContextSPtr> missing_columns;
930
931
0
    std::map<int, const FieldSchema*> data_file_id_to_field_schema;
932
0
    for (int idx = 0; idx < _data_file_field_desc->size(); ++idx) {
933
0
        auto field_schema = _data_file_field_desc->get_column(idx);
934
0
        if (_data_file_field_desc->get_column(idx)->field_id == -1) {
935
0
            return Status::DataQualityError("Iceberg equality delete data file missing field id.");
936
0
        }
937
0
        data_file_id_to_field_schema[_data_file_field_desc->get_column(idx)->field_id] =
938
0
                field_schema;
939
0
    }
940
941
0
    for (const auto& delete_file : delete_files) {
942
0
        TFileRangeDesc delete_desc;
943
        // must use __set() method to make sure __isset is true
944
0
        delete_desc.__set_fs_name(_range.fs_name);
945
0
        delete_desc.path = delete_file.path;
946
0
        delete_desc.start_offset = 0;
947
0
        delete_desc.size = -1;
948
0
        delete_desc.file_size = -1;
949
950
0
        if (!delete_file.__isset.field_ids) [[unlikely]] {
951
0
            return Status::InternalError(
952
0
                    "missing delete field ids when reading equality delete file");
953
0
        }
954
0
        auto& read_column_field_ids = delete_file.field_ids;
955
0
        std::set<int> read_column_field_ids_set;
956
0
        for (const auto& field_id : read_column_field_ids) {
957
0
            read_column_field_ids_set.insert(field_id);
958
0
            _equality_delete_col_ids.insert(field_id);
959
0
        }
960
961
0
        auto delete_reader = ParquetReader::create_unique(
962
0
                _profile, _params, delete_desc, READ_DELETE_FILE_BATCH_SIZE,
963
0
                &_state->timezone_obj(), _io_ctx, _state, _meta_cache);
964
0
        RETURN_IF_ERROR(delete_reader->init_schema_reader());
965
966
        // the column that to read equality delete file.
967
        // (delete file may be have extra columns that don't need to read)
968
0
        std::vector<std::string> delete_col_names;
969
0
        std::vector<DataTypePtr> delete_col_types;
970
0
        std::vector<int> delete_col_ids;
971
0
        std::unordered_map<std::string, uint32_t> delete_col_name_to_block_idx;
972
973
0
        const FieldDescriptor* delete_field_desc = nullptr;
974
0
        RETURN_IF_ERROR(delete_reader->get_file_metadata_schema(&delete_field_desc));
975
0
        DCHECK(delete_field_desc != nullptr);
976
977
0
        auto eq_file_node = std::make_shared<TableSchemaChangeHelper::StructNode>();
978
0
        for (const auto& delete_file_field : delete_field_desc->get_fields_schema()) {
979
0
            if (delete_file_field.field_id == -1) [[unlikely]] { // missing delete_file_field id
980
                // equality delete file must have delete_file_field id to match column.
981
0
                return Status::DataQualityError(
982
0
                        "missing delete_file_field id when reading equality delete file");
983
0
            } else if (read_column_field_ids_set.contains(delete_file_field.field_id)) {
984
                // the column that need to read.
985
0
                if (delete_file_field.children.size() > 0) [[unlikely]] { // complex column
986
0
                    return Status::InternalError(
987
0
                            "can not support read complex column in equality delete file");
988
0
                } else if (!data_file_id_to_field_schema.contains(delete_file_field.field_id))
989
0
                        [[unlikely]] {
990
0
                    return Status::DataQualityError(
991
0
                            "can not find delete field id in data file schema when reading "
992
0
                            "equality delete file");
993
0
                }
994
0
                auto data_file_field = data_file_id_to_field_schema[delete_file_field.field_id];
995
0
                if (data_file_field->data_type->get_primitive_type() !=
996
0
                    delete_file_field.data_type->get_primitive_type()) [[unlikely]] {
997
0
                    return Status::NotSupported(
998
0
                            "Not Support type change in equality delete, field: {}, delete "
999
0
                            "file type: {}, data file type: {}",
1000
0
                            delete_file_field.field_id, delete_file_field.data_type->get_name(),
1001
0
                            data_file_field->data_type->get_name());
1002
0
                }
1003
1004
0
                std::string filed_lower_name = to_lower(delete_file_field.name);
1005
0
                eq_file_node->add_children(filed_lower_name, delete_file_field.name,
1006
0
                                           std::make_shared<TableSchemaChangeHelper::ScalarNode>());
1007
1008
0
                delete_col_ids.emplace_back(delete_file_field.field_id);
1009
0
                delete_col_names.emplace_back(filed_lower_name);
1010
0
                delete_col_types.emplace_back(make_nullable(delete_file_field.data_type));
1011
1012
0
                read_column_field_ids_set.erase(delete_file_field.field_id);
1013
0
            } else {
1014
                // delete file may be have extra columns that don't need to read
1015
0
            }
1016
0
        }
1017
0
        if (!read_column_field_ids_set.empty()) [[unlikely]] {
1018
0
            return Status::DataQualityError("some field ids not found in equality delete file.");
1019
0
        }
1020
1021
0
        for (uint32_t idx = 0; idx < delete_col_names.size(); ++idx) {
1022
0
            delete_col_name_to_block_idx[delete_col_names[idx]] = idx;
1023
0
        }
1024
0
        phmap::flat_hash_map<int, std::vector<std::shared_ptr<ColumnPredicate>>> tmp;
1025
0
        RETURN_IF_ERROR(delete_reader->init_reader(delete_col_names, &delete_col_name_to_block_idx,
1026
0
                                                   {}, tmp, nullptr, nullptr, nullptr, nullptr,
1027
0
                                                   nullptr, eq_file_node, false));
1028
0
        RETURN_IF_ERROR(delete_reader->set_fill_columns(partition_columns, missing_columns));
1029
1030
0
        if (!_equality_delete_block_map.contains(delete_col_ids)) {
1031
0
            _equality_delete_block_map.emplace(delete_col_ids, _equality_delete_blocks.size());
1032
0
            Block block;
1033
0
            _generate_equality_delete_block(&block, delete_col_names, delete_col_types);
1034
0
            _equality_delete_blocks.emplace_back(block);
1035
0
        }
1036
0
        Block& eq_file_block = _equality_delete_blocks[_equality_delete_block_map[delete_col_ids]];
1037
0
        bool eof = false;
1038
0
        while (!eof) {
1039
0
            Block tmp_block;
1040
0
            _generate_equality_delete_block(&tmp_block, delete_col_names, delete_col_types);
1041
0
            size_t read_rows = 0;
1042
0
            RETURN_IF_ERROR(delete_reader->get_next_block(&tmp_block, &read_rows, &eof));
1043
0
            if (read_rows > 0) {
1044
0
                MutableBlock mutable_block(&eq_file_block);
1045
0
                RETURN_IF_ERROR(mutable_block.merge(tmp_block));
1046
0
            }
1047
0
        }
1048
0
    }
1049
1050
0
    for (const auto& [delete_col_ids, block_idx] : _equality_delete_block_map) {
1051
0
        auto& eq_file_block = _equality_delete_blocks[block_idx];
1052
0
        auto equality_delete_impl =
1053
0
                EqualityDeleteBase::get_delete_impl(&eq_file_block, delete_col_ids);
1054
0
        RETURN_IF_ERROR(equality_delete_impl->init(_profile));
1055
0
        _equality_delete_impls.emplace_back(std::move(equality_delete_impl));
1056
0
    }
1057
0
    return Status::OK();
1058
0
}
1059
1060
Status IcebergOrcReader::_process_equality_delete(
1061
0
        const std::vector<TIcebergDeleteFileDesc>& delete_files) {
1062
0
    std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>
1063
0
            partition_columns;
1064
0
    std::unordered_map<std::string, VExprContextSPtr> missing_columns;
1065
1066
0
    std::map<int, int> data_file_id_to_field_idx;
1067
0
    for (int idx = 0; idx < _data_file_type_desc->getSubtypeCount(); ++idx) {
1068
0
        if (!_data_file_type_desc->getSubtype(idx)->hasAttributeKey(ICEBERG_ORC_ATTRIBUTE)) {
1069
0
            return Status::DataQualityError("Iceberg equality delete data file missing field id.");
1070
0
        }
1071
0
        auto field_id = std::stoi(
1072
0
                _data_file_type_desc->getSubtype(idx)->getAttributeValue(ICEBERG_ORC_ATTRIBUTE));
1073
0
        data_file_id_to_field_idx[field_id] = idx;
1074
0
    }
1075
1076
0
    for (const auto& delete_file : delete_files) {
1077
0
        TFileRangeDesc delete_desc;
1078
        // must use __set() method to make sure __isset is true
1079
0
        delete_desc.__set_fs_name(_range.fs_name);
1080
0
        delete_desc.path = delete_file.path;
1081
0
        delete_desc.start_offset = 0;
1082
0
        delete_desc.size = -1;
1083
0
        delete_desc.file_size = -1;
1084
1085
0
        if (!delete_file.__isset.field_ids) [[unlikely]] {
1086
0
            return Status::InternalError(
1087
0
                    "missing delete field ids when reading equality delete file");
1088
0
        }
1089
0
        auto& read_column_field_ids = delete_file.field_ids;
1090
0
        std::set<int> read_column_field_ids_set;
1091
0
        for (const auto& field_id : read_column_field_ids) {
1092
0
            read_column_field_ids_set.insert(field_id);
1093
0
            _equality_delete_col_ids.insert(field_id);
1094
0
        }
1095
1096
0
        auto delete_reader = OrcReader::create_unique(_profile, _state, _params, delete_desc,
1097
0
                                                      READ_DELETE_FILE_BATCH_SIZE,
1098
0
                                                      _state->timezone(), _io_ctx, _meta_cache);
1099
0
        RETURN_IF_ERROR(delete_reader->init_schema_reader());
1100
        // delete file schema
1101
0
        std::vector<std::string> delete_file_col_names;
1102
0
        std::vector<DataTypePtr> delete_file_col_types;
1103
0
        RETURN_IF_ERROR(
1104
0
                delete_reader->get_parsed_schema(&delete_file_col_names, &delete_file_col_types));
1105
1106
        // the column that to read equality delete file.
1107
        // (delete file maybe have extra columns that don't need to read)
1108
0
        std::vector<std::string> delete_col_names;
1109
0
        std::vector<DataTypePtr> delete_col_types;
1110
0
        std::vector<int> delete_col_ids;
1111
0
        std::unordered_map<std::string, uint32_t> delete_col_name_to_block_idx;
1112
1113
0
        const orc::Type* delete_field_desc = nullptr;
1114
0
        RETURN_IF_ERROR(delete_reader->get_file_type(&delete_field_desc));
1115
0
        DCHECK(delete_field_desc != nullptr);
1116
1117
0
        auto eq_file_node = std::make_shared<TableSchemaChangeHelper::StructNode>();
1118
1119
0
        for (size_t idx = 0; idx < delete_field_desc->getSubtypeCount(); idx++) {
1120
0
            auto delete_file_field = delete_field_desc->getSubtype(idx);
1121
1122
0
            if (!delete_file_field->hasAttributeKey(ICEBERG_ORC_ATTRIBUTE))
1123
0
                    [[unlikely]] { // missing delete_file_field id
1124
                // equality delete file must have delete_file_field id to match column.
1125
0
                return Status::DataQualityError(
1126
0
                        "missing delete_file_field id when reading equality delete file");
1127
0
            } else {
1128
0
                auto delete_field_id =
1129
0
                        std::stoi(delete_file_field->getAttributeValue(ICEBERG_ORC_ATTRIBUTE));
1130
0
                if (read_column_field_ids_set.contains(delete_field_id)) {
1131
                    // the column that need to read.
1132
0
                    if (is_complex_type(delete_file_col_types[idx]->get_primitive_type()))
1133
0
                            [[unlikely]] {
1134
0
                        return Status::InternalError(
1135
0
                                "can not support read complex column in equality delete file.");
1136
0
                    } else if (!data_file_id_to_field_idx.contains(delete_field_id)) [[unlikely]] {
1137
0
                        return Status::DataQualityError(
1138
0
                                "can not find delete field id in data file schema when reading "
1139
0
                                "equality delete file");
1140
0
                    }
1141
1142
0
                    auto data_file_field = _data_file_type_desc->getSubtype(
1143
0
                            data_file_id_to_field_idx[delete_field_id]);
1144
1145
0
                    if (delete_file_field->getKind() != data_file_field->getKind()) [[unlikely]] {
1146
0
                        return Status::NotSupported(
1147
0
                                "Not Support type change in equality delete, field: {}, delete "
1148
0
                                "file type: {}, data file type: {}",
1149
0
                                delete_field_id, delete_file_field->getKind(),
1150
0
                                data_file_field->getKind());
1151
0
                    }
1152
0
                    std::string filed_lower_name = to_lower(delete_field_desc->getFieldName(idx));
1153
0
                    eq_file_node->add_children(
1154
0
                            filed_lower_name, delete_field_desc->getFieldName(idx),
1155
0
                            std::make_shared<TableSchemaChangeHelper::ScalarNode>());
1156
1157
0
                    delete_col_ids.emplace_back(delete_field_id);
1158
0
                    delete_col_names.emplace_back(filed_lower_name);
1159
0
                    delete_col_types.emplace_back(make_nullable(delete_file_col_types[idx]));
1160
0
                    read_column_field_ids_set.erase(delete_field_id);
1161
0
                }
1162
0
            }
1163
0
        }
1164
0
        if (!read_column_field_ids_set.empty()) [[unlikely]] {
1165
0
            return Status::DataQualityError("some field ids not found in equality delete file.");
1166
0
        }
1167
1168
0
        for (uint32_t idx = 0; idx < delete_col_names.size(); ++idx) {
1169
0
            delete_col_name_to_block_idx[delete_col_names[idx]] = idx;
1170
0
        }
1171
1172
0
        RETURN_IF_ERROR(delete_reader->init_reader(&delete_col_names, &delete_col_name_to_block_idx,
1173
0
                                                   {}, false, nullptr, nullptr, nullptr, nullptr,
1174
0
                                                   eq_file_node));
1175
0
        RETURN_IF_ERROR(delete_reader->set_fill_columns(partition_columns, missing_columns));
1176
1177
0
        if (!_equality_delete_block_map.contains(delete_col_ids)) {
1178
0
            _equality_delete_block_map.emplace(delete_col_ids, _equality_delete_blocks.size());
1179
0
            Block block;
1180
0
            _generate_equality_delete_block(&block, delete_col_names, delete_col_types);
1181
0
            _equality_delete_blocks.emplace_back(block);
1182
0
        }
1183
0
        Block& eq_file_block = _equality_delete_blocks[_equality_delete_block_map[delete_col_ids]];
1184
0
        bool eof = false;
1185
0
        while (!eof) {
1186
0
            Block tmp_block;
1187
0
            _generate_equality_delete_block(&tmp_block, delete_col_names, delete_col_types);
1188
0
            size_t read_rows = 0;
1189
0
            RETURN_IF_ERROR(delete_reader->get_next_block(&tmp_block, &read_rows, &eof));
1190
0
            if (read_rows > 0) {
1191
0
                MutableBlock mutable_block(&eq_file_block);
1192
0
                RETURN_IF_ERROR(mutable_block.merge(tmp_block));
1193
0
            }
1194
0
        }
1195
0
    }
1196
1197
0
    for (const auto& [delete_col_ids, block_idx] : _equality_delete_block_map) {
1198
0
        auto& eq_file_block = _equality_delete_blocks[block_idx];
1199
0
        auto equality_delete_impl =
1200
0
                EqualityDeleteBase::get_delete_impl(&eq_file_block, delete_col_ids);
1201
0
        RETURN_IF_ERROR(equality_delete_impl->init(_profile));
1202
0
        _equality_delete_impls.emplace_back(std::move(equality_delete_impl));
1203
0
    }
1204
0
    return Status::OK();
1205
0
}
1206
#include "common/compile_check_end.h"
1207
} // namespace doris