Coverage Report

Created: 2025-07-27 01:30

/root/doris/be/src/olap/push_handler.cpp
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "olap/push_handler.h"
19
20
#include <fmt/core.h>
21
#include <gen_cpp/AgentService_types.h>
22
#include <gen_cpp/Descriptors_types.h>
23
#include <gen_cpp/MasterService_types.h>
24
#include <gen_cpp/PaloInternalService_types.h>
25
#include <gen_cpp/PlanNodes_types.h>
26
#include <gen_cpp/Types_types.h>
27
#include <gen_cpp/olap_file.pb.h>
28
#include <gen_cpp/types.pb.h>
29
#include <glog/logging.h>
30
31
#include <algorithm>
32
#include <iostream>
33
#include <mutex>
34
#include <new>
35
#include <queue>
36
#include <shared_mutex>
37
#include <type_traits>
38
39
#include "common/compiler_util.h" // IWYU pragma: keep
40
#include "common/config.h"
41
#include "common/logging.h"
42
#include "common/status.h"
43
#include "io/hdfs_builder.h"
44
#include "olap/cumulative_compaction_time_series_policy.h"
45
#include "olap/delete_handler.h"
46
#include "olap/olap_define.h"
47
#include "olap/rowset/pending_rowset_helper.h"
48
#include "olap/rowset/rowset_writer.h"
49
#include "olap/rowset/rowset_writer_context.h"
50
#include "olap/schema.h"
51
#include "olap/storage_engine.h"
52
#include "olap/tablet.h"
53
#include "olap/tablet_manager.h"
54
#include "olap/tablet_schema.h"
55
#include "olap/txn_manager.h"
56
#include "runtime/descriptors.h"
57
#include "runtime/exec_env.h"
58
#include "util/time.h"
59
#include "vec/core/block.h"
60
#include "vec/core/column_with_type_and_name.h"
61
#include "vec/data_types/data_type_factory.hpp"
62
#include "vec/data_types/data_type_nullable.h"
63
#include "vec/exec/format/parquet/vparquet_reader.h"
64
#include "vec/exprs/vexpr_context.h"
65
#include "vec/functions/simple_function_factory.h"
66
67
namespace doris {
68
using namespace ErrorCode;
69
70
// Process push command, the main logical is as follows:
71
//    a. related tablets not exist:
72
//        current table isn't in schemachange state, only push for current
73
//        tablet
74
//    b. related tablets exist
75
//       I.  current tablet is old table (cur.creation_time <
76
//       related.creation_time):
77
//           push for current table and than convert data for related tables
78
//       II. current table is new table:
79
//           this usually means schema change is over,
80
//           clear schema change info in both current tablet and related
81
//           tablets, finally we will only push for current tablets. this is
82
//           very useful in rollup action.
83
Status PushHandler::process_streaming_ingestion(TabletSharedPtr tablet, const TPushReq& request,
84
                                                PushType push_type,
85
0
                                                std::vector<TTabletInfo>* tablet_info_vec) {
86
0
    LOG(INFO) << "begin to realtime push. tablet=" << tablet->tablet_id()
87
0
              << ", transaction_id=" << request.transaction_id;
88
89
0
    Status res = Status::OK();
90
0
    _request = request;
91
92
0
    RETURN_IF_ERROR(DescriptorTbl::create(&_pool, _request.desc_tbl, &_desc_tbl));
93
94
0
    res = _do_streaming_ingestion(tablet, request, push_type, tablet_info_vec);
95
96
0
    if (res.ok()) {
  Branch (96:9): [True: 0, False: 0]
97
0
        if (tablet_info_vec != nullptr) {
  Branch (97:13): [True: 0, False: 0]
98
0
            TTabletInfo tablet_info;
99
0
            tablet_info.tablet_id = tablet->tablet_id();
100
0
            tablet_info.schema_hash = tablet->schema_hash();
101
0
            RETURN_IF_ERROR(_engine.tablet_manager()->report_tablet_info(&tablet_info));
102
0
            tablet_info_vec->push_back(tablet_info);
103
0
        }
104
0
        LOG(INFO) << "process realtime push successfully. "
105
0
                  << "tablet=" << tablet->tablet_id() << ", partition_id=" << request.partition_id
106
0
                  << ", transaction_id=" << request.transaction_id;
107
0
    }
108
109
0
    return res;
110
0
}
111
112
Status PushHandler::_do_streaming_ingestion(TabletSharedPtr tablet, const TPushReq& request,
113
                                            PushType push_type,
114
0
                                            std::vector<TTabletInfo>* tablet_info_vec) {
115
    // add transaction in engine, then check sc status
116
    // lock, prevent sc handler checking transaction concurrently
117
0
    if (tablet == nullptr) {
  Branch (117:9): [True: 0, False: 0]
118
0
        return Status::Error<TABLE_NOT_FOUND>(
119
0
                "PushHandler::_do_streaming_ingestion input tablet is nullptr");
120
0
    }
121
122
0
    std::shared_lock base_migration_rlock(tablet->get_migration_lock(), std::try_to_lock);
123
0
    DBUG_EXECUTE_IF("PushHandler::_do_streaming_ingestion.try_lock_fail", {
Line
Count
Source
37
0
    if (UNLIKELY(config::enable_debug_points)) {                              \
38
0
        auto dp = DebugPoints::instance()->get_debug_point(debug_point_name); \
39
0
        if (dp) {                                                             \
  Branch (39:13): [True: 0, False: 0]
40
0
            [[maybe_unused]] auto DP_NAME = debug_point_name;                 \
41
0
            { code; }                                                         \
42
0
        }                                                                     \
43
0
    }
124
0
        return Status::Error<TRY_LOCK_FAILED>(
125
0
                "PushHandler::_do_streaming_ingestion get lock failed");
126
0
    })
127
0
    if (!base_migration_rlock.owns_lock()) {
  Branch (127:9): [True: 0, False: 0]
128
0
        return Status::Error<TRY_LOCK_FAILED>(
129
0
                "PushHandler::_do_streaming_ingestion get lock failed");
130
0
    }
131
0
    PUniqueId load_id;
132
0
    load_id.set_hi(0);
133
0
    load_id.set_lo(0);
134
0
    {
135
0
        std::lock_guard<std::mutex> push_lock(tablet->get_push_lock());
136
0
        RETURN_IF_ERROR(_engine.txn_manager()->prepare_txn(request.partition_id, *tablet,
137
0
                                                           request.transaction_id, load_id));
138
0
    }
139
140
    // not call validate request here, because realtime load does not
141
    // contain version info
142
143
0
    Status res;
144
    // check delete condition if push for delete
145
0
    std::queue<DeletePredicatePB> del_preds;
146
0
    if (push_type == PushType::PUSH_FOR_DELETE) {
  Branch (146:9): [True: 0, False: 0]
147
0
        DeletePredicatePB del_pred;
148
0
        TabletSchema tablet_schema;
149
0
        tablet_schema.copy_from(*tablet->tablet_schema());
150
0
        if (!request.columns_desc.empty() && request.columns_desc[0].col_unique_id >= 0) {
  Branch (150:13): [True: 0, False: 0]
  Branch (150:46): [True: 0, False: 0]
151
0
            tablet_schema.clear_columns();
152
0
            for (const auto& column_desc : request.columns_desc) {
  Branch (152:42): [True: 0, False: 0]
153
0
                tablet_schema.append_column(TabletColumn(column_desc));
154
0
            }
155
0
        }
156
0
        res = DeleteHandler::generate_delete_predicate(tablet_schema, request.delete_conditions,
157
0
                                                       &del_pred);
158
0
        del_preds.push(del_pred);
159
0
        if (!res.ok()) {
  Branch (159:13): [True: 0, False: 0]
160
0
            LOG(WARNING) << "fail to generate delete condition. res=" << res
161
0
                         << ", tablet=" << tablet->tablet_id();
162
0
            return res;
163
0
        }
164
0
    }
165
166
0
    int32_t max_version_config = tablet->max_version_config();
167
    // check if version number exceed limit
168
0
    if (tablet->exceed_version_limit(max_version_config)) {
  Branch (168:9): [True: 0, False: 0]
169
0
        return Status::Status::Error<TOO_MANY_VERSION>(
170
0
                "failed to push data. version count: {}, exceed limit: {}, tablet: {}. Please "
171
0
                "reduce the frequency of loading data or adjust the max_tablet_version_num or "
172
0
                "time_series_max_tablet_version_num in "
173
0
                "be.conf to a larger value.",
174
0
                tablet->version_count(), max_version_config, tablet->tablet_id());
175
0
    }
176
177
0
    int version_count = tablet->version_count() + tablet->stale_version_count();
178
0
    if (tablet->avg_rs_meta_serialize_size() * version_count >
  Branch (178:9): [True: 0, False: 0]
179
0
        config::tablet_meta_serialize_size_limit) {
180
0
        return Status::Error<TOO_MANY_VERSION>(
181
0
                "failed to init rowset builder. meta serialize size : {}, exceed limit: {}, "
182
0
                "tablet: {}. Please reduce the frequency of loading data or adjust the "
183
0
                "max_tablet_version_num in be.conf to a larger value.",
184
0
                tablet->avg_rs_meta_serialize_size() * version_count,
185
0
                config::tablet_meta_serialize_size_limit, tablet->tablet_id());
186
0
    }
187
188
0
    auto tablet_schema = std::make_shared<TabletSchema>();
189
0
    tablet_schema->copy_from(*tablet->tablet_schema());
190
0
    if (!request.columns_desc.empty() && request.columns_desc[0].col_unique_id >= 0) {
  Branch (190:9): [True: 0, False: 0]
  Branch (190:42): [True: 0, False: 0]
191
0
        tablet_schema->clear_columns();
192
        // TODO(lhy) handle variant
193
0
        for (const auto& column_desc : request.columns_desc) {
  Branch (193:38): [True: 0, False: 0]
194
0
            tablet_schema->append_column(TabletColumn(column_desc));
195
0
        }
196
0
    }
197
0
    RowsetSharedPtr rowset_to_add;
198
    // writes
199
0
    res = _convert_v2(tablet, &rowset_to_add, tablet_schema, push_type);
200
0
    if (!res.ok()) {
  Branch (200:9): [True: 0, False: 0]
201
0
        LOG(WARNING) << "fail to convert tmp file when realtime push. res=" << res
202
0
                     << ", failed to process realtime push."
203
0
                     << ", tablet=" << tablet->tablet_id()
204
0
                     << ", transaction_id=" << request.transaction_id;
205
206
0
        Status rollback_status = _engine.txn_manager()->rollback_txn(request.partition_id, *tablet,
207
0
                                                                     request.transaction_id);
208
        // has to check rollback status to ensure not delete a committed rowset
209
0
        if (rollback_status.ok()) {
  Branch (209:13): [True: 0, False: 0]
210
0
            _engine.add_unused_rowset(rowset_to_add);
211
0
        }
212
0
        return res;
213
0
    }
214
215
    // add pending data to tablet
216
217
0
    if (push_type == PushType::PUSH_FOR_DELETE) {
  Branch (217:9): [True: 0, False: 0]
218
0
        rowset_to_add->rowset_meta()->set_delete_predicate(std::move(del_preds.front()));
219
0
        del_preds.pop();
220
0
    }
221
    // Transfer ownership of `PendingRowsetGuard` to `TxnManager`
222
0
    Status commit_status = _engine.txn_manager()->commit_txn(
223
0
            request.partition_id, *tablet, request.transaction_id, load_id, rowset_to_add,
224
0
            std::move(_pending_rs_guard), false);
225
0
    if (!commit_status.ok() && !commit_status.is<PUSH_TRANSACTION_ALREADY_EXIST>()) {
  Branch (225:9): [True: 0, False: 0]
  Branch (225:32): [True: 0, False: 0]
226
0
        res = std::move(commit_status);
227
0
    }
228
0
    return res;
229
0
}
230
231
Status PushHandler::_convert_v2(TabletSharedPtr cur_tablet, RowsetSharedPtr* cur_rowset,
232
0
                                TabletSchemaSPtr tablet_schema, PushType push_type) {
233
0
    Status res = Status::OK();
234
0
    uint32_t num_rows = 0;
235
0
    PUniqueId load_id;
236
0
    load_id.set_hi(0);
237
0
    load_id.set_lo(0);
238
239
0
    do {
240
0
        VLOG_NOTICE << "start to convert delta file.";
Line
Count
Source
42
0
#define VLOG_NOTICE VLOG(3)
241
242
        // 1. init RowsetBuilder of cur_tablet for current push
243
0
        VLOG_NOTICE << "init rowset builder. tablet=" << cur_tablet->tablet_id()
Line
Count
Source
42
0
#define VLOG_NOTICE VLOG(3)
244
0
                    << ", block_row_size=" << tablet_schema->num_rows_per_row_block();
245
        // although the spark load output files are fully sorted,
246
        // but it depends on thirparty implementation, so we conservatively
247
        // set this value to OVERLAP_UNKNOWN
248
0
        RowsetWriterContext context;
249
0
        context.txn_id = _request.transaction_id;
250
0
        context.load_id = load_id;
251
0
        context.rowset_state = PREPARED;
252
0
        context.segments_overlap = OVERLAP_UNKNOWN;
253
0
        context.tablet_schema = tablet_schema;
254
0
        context.newest_write_timestamp = UnixSeconds();
255
0
        auto rowset_writer = DORIS_TRY(cur_tablet->create_rowset_writer(context, false));
256
0
        _pending_rs_guard = _engine.pending_local_rowsets().add(context.rowset_id);
257
258
        // 2. Init PushBrokerReader to read broker file if exist,
259
        //    in case of empty push this will be skipped.
260
0
        std::string path;
261
        // If it is push delete, the broker_scan_range is not set.
262
0
        if (push_type == PushType::PUSH_NORMAL_V2) {
  Branch (262:13): [True: 0, False: 0]
263
0
            path = _request.broker_scan_range.ranges[0].path;
264
0
            LOG(INFO) << "tablet=" << cur_tablet->tablet_id() << ", file path=" << path
265
0
                      << ", file size=" << _request.broker_scan_range.ranges[0].file_size;
266
0
        }
267
        // For push load, this tablet maybe not need push data, so that the path maybe empty
268
0
        if (!path.empty()) {
  Branch (268:13): [True: 0, False: 0]
269
            // init schema
270
0
            std::unique_ptr<Schema> schema(new (std::nothrow) Schema(tablet_schema));
271
0
            if (schema == nullptr) {
  Branch (271:17): [True: 0, False: 0]
272
0
                res = Status::Error<MEM_ALLOC_FAILED>("fail to create schema. tablet={}",
273
0
                                                      cur_tablet->tablet_id());
274
0
                break;
275
0
            }
276
277
            // init Reader
278
0
            std::unique_ptr<PushBrokerReader> reader = PushBrokerReader::create_unique(
279
0
                    schema.get(), _request.broker_scan_range, _request.desc_tbl);
280
0
            res = reader->init();
281
0
            if (reader == nullptr || !res.ok()) {
  Branch (281:17): [True: 0, False: 0]
  Branch (281:38): [True: 0, False: 0]
282
0
                res = Status::Error<PUSH_INIT_ERROR>("fail to init reader. res={}, tablet={}", res,
283
0
                                                     cur_tablet->tablet_id());
284
0
                break;
285
0
            }
286
287
            // 3. Init Block
288
0
            vectorized::Block block;
289
290
            // 4. Read data from broker and write into cur_tablet
291
0
            VLOG_NOTICE << "start to convert etl file to delta.";
Line
Count
Source
42
0
#define VLOG_NOTICE VLOG(3)
292
0
            while (!reader->eof()) {
  Branch (292:20): [True: 0, False: 0]
293
0
                res = reader->next(&block);
294
0
                if (!res.ok()) {
  Branch (294:21): [True: 0, False: 0]
295
0
                    LOG(WARNING) << "read next row failed."
296
0
                                 << " res=" << res << " read_rows=" << num_rows;
297
0
                    break;
298
0
                } else {
299
0
                    if (reader->eof()) {
  Branch (299:25): [True: 0, False: 0]
300
0
                        break;
301
0
                    }
302
0
                    if (!(res = rowset_writer->add_block(&block)).ok()) {
  Branch (302:25): [True: 0, False: 0]
303
0
                        LOG(WARNING) << "fail to attach block to rowset_writer. "
304
0
                                     << "res=" << res << ", tablet=" << cur_tablet->tablet_id()
305
0
                                     << ", read_rows=" << num_rows;
306
0
                        break;
307
0
                    }
308
0
                    num_rows++;
309
0
                }
310
0
            }
311
312
0
            reader->print_profile();
313
0
            RETURN_IF_ERROR(reader->close());
314
0
        }
315
316
0
        if (!res.ok()) {
  Branch (316:13): [True: 0, False: 0]
317
0
            break;
318
0
        }
319
320
0
        if (!(res = rowset_writer->flush()).ok()) {
  Branch (320:13): [True: 0, False: 0]
321
0
            LOG(WARNING) << "failed to finalize writer";
322
0
            break;
323
0
        }
324
325
0
        if (!(res = rowset_writer->build(*cur_rowset)).ok()) {
  Branch (325:13): [True: 0, False: 0]
326
0
            LOG(WARNING) << "failed to build rowset";
327
0
            break;
328
0
        }
329
330
0
        _write_bytes += (*cur_rowset)->data_disk_size();
331
0
        _write_rows += (*cur_rowset)->num_rows();
332
0
    } while (false);
  Branch (332:14): [Folded - Ignored]
333
334
0
    VLOG_TRACE << "convert delta file end. res=" << res << ", tablet=" << cur_tablet->tablet_id()
Line
Count
Source
40
0
#define VLOG_TRACE VLOG(10)
335
0
               << ", processed_rows" << num_rows;
336
0
    return res;
337
0
}
338
339
PushBrokerReader::PushBrokerReader(const Schema* schema, const TBrokerScanRange& t_scan_range,
340
                                   const TDescriptorTable& t_desc_tbl)
341
        : _ready(false),
342
          _eof(false),
343
          _next_range(0),
344
          _t_desc_tbl(t_desc_tbl),
345
          _cur_reader_eof(false),
346
          _params(t_scan_range.params),
347
0
          _ranges(t_scan_range.ranges) {
348
    // change broker params to file params
349
0
    if (_ranges.empty()) {
  Branch (349:9): [True: 0, False: 0]
350
0
        return;
351
0
    }
352
0
    _file_params.format_type = _ranges[0].format_type;
353
0
    _file_params.src_tuple_id = _params.src_tuple_id;
354
0
    _file_params.dest_tuple_id = _params.dest_tuple_id;
355
0
    _file_params.num_of_columns_from_file = _ranges[0].num_of_columns_from_file;
356
0
    _file_params.properties = _params.properties;
357
0
    _file_params.expr_of_dest_slot = _params.expr_of_dest_slot;
358
0
    _file_params.dest_sid_to_src_sid_without_trans = _params.dest_sid_to_src_sid_without_trans;
359
0
    _file_params.strict_mode = _params.strict_mode;
360
0
    if (_ranges[0].file_type == TFileType::FILE_HDFS) {
  Branch (360:9): [True: 0, False: 0]
361
0
        _file_params.hdfs_params = parse_properties(_params.properties);
362
0
    } else {
363
0
        _file_params.__isset.broker_addresses = true;
364
0
        _file_params.broker_addresses = t_scan_range.broker_addresses;
365
0
    }
366
367
0
    for (const auto& range : _ranges) {
  Branch (367:28): [True: 0, False: 0]
368
0
        TFileRangeDesc file_range;
369
        // TODO(cmy): in previous implementation, the file_type is set in _file_params
370
        // and it use _ranges[0].file_type.
371
        // Later, this field is moved to TFileRangeDesc, but here we still only use _ranges[0]'s
372
        // file_type.
373
        // Because I don't know if other range has this field, so just keep it same as before.
374
0
        file_range.__set_file_type(_ranges[0].file_type);
375
0
        file_range.__set_load_id(range.load_id);
376
0
        file_range.__set_path(range.path);
377
0
        file_range.__set_start_offset(range.start_offset);
378
0
        file_range.__set_size(range.size);
379
0
        file_range.__set_file_size(range.file_size);
380
0
        file_range.__set_columns_from_path(range.columns_from_path);
381
382
0
        _file_ranges.push_back(file_range);
383
0
    }
384
0
}
385
386
0
Status PushBrokerReader::init() {
387
    // init runtime state, runtime profile, counter
388
0
    TUniqueId dummy_id;
389
0
    dummy_id.hi = 0;
390
0
    dummy_id.lo = 0;
391
0
    TPlanFragmentExecParams params;
392
0
    params.fragment_instance_id = dummy_id;
393
0
    params.query_id = dummy_id;
394
0
    TExecPlanFragmentParams fragment_params;
395
0
    fragment_params.params = params;
396
0
    fragment_params.protocol_version = PaloInternalServiceVersion::V1;
397
0
    TQueryOptions query_options;
398
0
    TQueryGlobals query_globals;
399
0
    std::shared_ptr<MemTrackerLimiter> tracker = MemTrackerLimiter::create_shared(
400
0
            MemTrackerLimiter::Type::LOAD,
401
0
            fmt::format("PushBrokerReader:dummy_id={}", print_id(dummy_id)));
402
0
    _runtime_state = RuntimeState::create_unique(params, query_options, query_globals,
403
0
                                                 ExecEnv::GetInstance(), nullptr, tracker);
404
0
    DescriptorTbl* desc_tbl = nullptr;
405
0
    Status status = DescriptorTbl::create(_runtime_state->obj_pool(), _t_desc_tbl, &desc_tbl);
406
0
    if (UNLIKELY(!status.ok())) {
407
0
        return Status::Error<PUSH_INIT_ERROR>("Failed to create descriptor table, msg: {}", status);
408
0
    }
409
0
    _runtime_state->set_desc_tbl(desc_tbl);
410
0
    _runtime_profile = _runtime_state->runtime_profile();
411
0
    _runtime_profile->set_name("PushBrokerReader");
412
413
0
    _file_cache_statistics.reset(new io::FileCacheStatistics());
414
0
    _io_ctx.reset(new io::IOContext());
415
0
    _io_ctx->file_cache_stats = _file_cache_statistics.get();
416
0
    _io_ctx->query_id = &_runtime_state->query_id();
417
418
0
    auto slot_descs = desc_tbl->get_tuple_descriptor(0)->slots();
419
0
    for (auto& slot_desc : slot_descs) {
  Branch (419:26): [True: 0, False: 0]
420
0
        _all_col_names.push_back(to_lower((slot_desc->col_name())));
421
0
    }
422
423
0
    RETURN_IF_ERROR(_init_expr_ctxes());
424
425
0
    _ready = true;
426
0
    return Status::OK();
427
0
}
428
429
0
Status PushBrokerReader::next(vectorized::Block* block) {
430
0
    if (!_ready || block == nullptr) {
  Branch (430:9): [True: 0, False: 0]
  Branch (430:20): [True: 0, False: 0]
431
0
        return Status::Error<INVALID_ARGUMENT>("PushBrokerReader not ready or block is nullptr");
432
0
    }
433
0
    if (_cur_reader == nullptr || _cur_reader_eof) {
  Branch (433:9): [True: 0, False: 0]
  Branch (433:35): [True: 0, False: 0]
434
0
        RETURN_IF_ERROR(_get_next_reader());
435
0
        if (_eof) {
  Branch (435:13): [True: 0, False: 0]
436
0
            return Status::OK();
437
0
        }
438
0
    }
439
0
    RETURN_IF_ERROR(_init_src_block());
440
0
    size_t read_rows = 0;
441
0
    RETURN_IF_ERROR(_cur_reader->get_next_block(_src_block_ptr, &read_rows, &_cur_reader_eof));
442
0
    if (read_rows > 0) {
  Branch (442:9): [True: 0, False: 0]
443
0
        RETURN_IF_ERROR(_cast_to_input_block());
444
0
        RETURN_IF_ERROR(_convert_to_output_block(block));
445
0
    }
446
0
    return Status::OK();
447
0
}
448
449
0
Status PushBrokerReader::close() {
450
0
    _ready = false;
451
0
    return Status::OK();
452
0
}
453
454
0
Status PushBrokerReader::_init_src_block() {
455
0
    _src_block.clear();
456
0
    int idx = 0;
457
0
    for (auto& slot : _src_slot_descs) {
  Branch (457:21): [True: 0, False: 0]
458
0
        vectorized::DataTypePtr data_type;
459
0
        auto it = _name_to_col_type.find(slot->col_name());
460
0
        if (it == _name_to_col_type.end()) {
  Branch (460:13): [True: 0, False: 0]
461
            // not exist in file, using type from _input_tuple_desc
462
0
            data_type = vectorized::DataTypeFactory::instance().create_data_type(
463
0
                    slot->type(), slot->is_nullable());
464
0
        } else {
465
0
            data_type = vectorized::DataTypeFactory::instance().create_data_type(it->second, true);
466
0
        }
467
0
        if (data_type == nullptr) {
  Branch (467:13): [True: 0, False: 0]
468
0
            return Status::NotSupported("Not support data type {} for column {}",
469
0
                                        it == _name_to_col_type.end() ? slot->type().debug_string()
  Branch (469:41): [True: 0, False: 0]
470
0
                                                                      : it->second.debug_string(),
471
0
                                        slot->col_name());
472
0
        }
473
0
        vectorized::MutableColumnPtr data_column = data_type->create_column();
474
0
        _src_block.insert(vectorized::ColumnWithTypeAndName(std::move(data_column), data_type,
475
0
                                                            slot->col_name()));
476
0
        _src_block_name_to_idx.emplace(slot->col_name(), idx++);
477
0
    }
478
0
    _src_block_ptr = &_src_block;
479
0
    return Status::OK();
480
0
}
481
482
0
Status PushBrokerReader::_cast_to_input_block() {
483
0
    size_t idx = 0;
484
0
    for (auto& slot_desc : _src_slot_descs) {
  Branch (484:26): [True: 0, False: 0]
485
0
        if (_name_to_col_type.find(slot_desc->col_name()) == _name_to_col_type.end()) {
  Branch (485:13): [True: 0, False: 0]
486
0
            continue;
487
0
        }
488
0
        if (slot_desc->type().is_variant_type()) {
  Branch (488:13): [True: 0, False: 0]
489
0
            continue;
490
0
        }
491
0
        auto& arg = _src_block_ptr->get_by_name(slot_desc->col_name());
492
        // remove nullable here, let the get_function decide whether nullable
493
0
        auto return_type = slot_desc->get_data_type_ptr();
494
0
        idx = _src_block_name_to_idx[slot_desc->col_name()];
495
        // bitmap convert:src -> to_base64 -> bitmap_from_base64
496
0
        if (slot_desc->type().is_bitmap_type()) {
  Branch (496:13): [True: 0, False: 0]
497
0
            auto base64_return_type = vectorized::DataTypeFactory::instance().create_data_type(
498
0
                    vectorized::DataTypeString().get_type_as_type_descriptor(),
499
0
                    slot_desc->is_nullable());
500
0
            auto func_to_base64 = vectorized::SimpleFunctionFactory::instance().get_function(
501
0
                    "to_base64", {arg}, base64_return_type);
502
0
            RETURN_IF_ERROR(func_to_base64->execute(nullptr, *_src_block_ptr, {idx}, idx,
503
0
                                                    arg.column->size()));
504
0
            _src_block_ptr->get_by_position(idx).type = std::move(base64_return_type);
505
0
            auto& arg_base64 = _src_block_ptr->get_by_name(slot_desc->col_name());
506
0
            auto func_bitmap_from_base64 =
507
0
                    vectorized::SimpleFunctionFactory::instance().get_function(
508
0
                            "bitmap_from_base64", {arg_base64}, return_type);
509
0
            RETURN_IF_ERROR(func_bitmap_from_base64->execute(nullptr, *_src_block_ptr, {idx}, idx,
510
0
                                                             arg_base64.column->size()));
511
0
            _src_block_ptr->get_by_position(idx).type = std::move(return_type);
512
0
        } else {
513
0
            vectorized::ColumnsWithTypeAndName arguments {
514
0
                    arg,
515
0
                    {vectorized::DataTypeString().create_column_const(
516
0
                             arg.column->size(), remove_nullable(return_type)->get_family_name()),
517
0
                     std::make_shared<vectorized::DataTypeString>(), ""}};
518
0
            auto func_cast = vectorized::SimpleFunctionFactory::instance().get_function(
519
0
                    "CAST", arguments, return_type);
520
0
            RETURN_IF_ERROR(
521
0
                    func_cast->execute(nullptr, *_src_block_ptr, {idx}, idx, arg.column->size()));
522
0
            _src_block_ptr->get_by_position(idx).type = std::move(return_type);
523
0
        }
524
0
    }
525
0
    return Status::OK();
526
0
}
527
528
0
Status PushBrokerReader::_convert_to_output_block(vectorized::Block* block) {
529
0
    block->clear();
530
531
0
    int ctx_idx = 0;
532
0
    size_t rows = _src_block.rows();
533
0
    auto filter_column = vectorized::ColumnUInt8::create(rows, 1);
534
535
0
    for (auto slot_desc : _dest_tuple_desc->slots()) {
  Branch (535:25): [True: 0, False: 0]
536
0
        if (!slot_desc->is_materialized()) {
  Branch (536:13): [True: 0, False: 0]
537
0
            continue;
538
0
        }
539
0
        int dest_index = ctx_idx++;
540
0
        vectorized::ColumnPtr column_ptr;
541
542
0
        auto& ctx = _dest_expr_ctxs[dest_index];
543
0
        int result_column_id = -1;
544
        // PT1 => dest primitive type
545
0
        RETURN_IF_ERROR(ctx->execute(&_src_block, &result_column_id));
546
0
        column_ptr = _src_block.get_by_position(result_column_id).column;
547
        // column_ptr maybe a ColumnConst, convert it to a normal column
548
0
        column_ptr = column_ptr->convert_to_full_column_if_const();
549
0
        DCHECK(column_ptr != nullptr);
550
551
        // because of src_slot_desc is always be nullable, so the column_ptr after do dest_expr
552
        // is likely to be nullable
553
0
        if (LIKELY(column_ptr->is_nullable())) {
554
0
            if (!slot_desc->is_nullable()) {
  Branch (554:17): [True: 0, False: 0]
555
0
                column_ptr = remove_nullable(column_ptr);
556
0
            }
557
0
        } else if (slot_desc->is_nullable()) {
  Branch (557:20): [True: 0, False: 0]
558
0
            column_ptr = make_nullable(column_ptr);
559
0
        }
560
0
        block->insert(dest_index,
561
0
                      vectorized::ColumnWithTypeAndName(column_ptr, slot_desc->get_data_type_ptr(),
562
0
                                                        slot_desc->col_name()));
563
0
    }
564
0
    _src_block.clear();
565
566
0
    size_t dest_size = block->columns();
567
0
    block->insert(vectorized::ColumnWithTypeAndName(std::move(filter_column),
568
0
                                                    std::make_shared<vectorized::DataTypeUInt8>(),
569
0
                                                    "filter column"));
570
0
    RETURN_IF_ERROR(vectorized::Block::filter_block(block, dest_size, dest_size));
571
0
    return Status::OK();
572
0
}
573
574
0
void PushBrokerReader::print_profile() {
575
0
    std::stringstream ss;
576
0
    _runtime_profile->pretty_print(&ss);
577
0
    LOG(INFO) << ss.str();
578
0
}
579
580
0
Status PushBrokerReader::_init_expr_ctxes() {
581
    // Construct _src_slot_descs
582
0
    const TupleDescriptor* src_tuple_desc =
583
0
            _runtime_state->desc_tbl().get_tuple_descriptor(_params.src_tuple_id);
584
0
    if (src_tuple_desc == nullptr) {
  Branch (584:9): [True: 0, False: 0]
585
0
        return Status::InternalError("Unknown source tuple descriptor, tuple_id={}",
586
0
                                     _params.src_tuple_id);
587
0
    }
588
589
0
    std::map<SlotId, SlotDescriptor*> src_slot_desc_map;
590
0
    std::unordered_map<SlotDescriptor*, int> src_slot_desc_to_index {};
591
0
    for (int i = 0, len = src_tuple_desc->slots().size(); i < len; ++i) {
  Branch (591:59): [True: 0, False: 0]
592
0
        auto* slot_desc = src_tuple_desc->slots()[i];
593
0
        src_slot_desc_to_index.emplace(slot_desc, i);
594
0
        src_slot_desc_map.emplace(slot_desc->id(), slot_desc);
595
0
    }
596
0
    for (auto slot_id : _params.src_slot_ids) {
  Branch (596:23): [True: 0, False: 0]
597
0
        auto it = src_slot_desc_map.find(slot_id);
598
0
        if (it == std::end(src_slot_desc_map)) {
  Branch (598:13): [True: 0, False: 0]
599
0
            return Status::InternalError("Unknown source slot descriptor, slot_id={}", slot_id);
600
0
        }
601
0
        _src_slot_descs.emplace_back(it->second);
602
0
    }
603
0
    _row_desc.reset(new RowDescriptor(_runtime_state->desc_tbl(),
604
0
                                      std::vector<TupleId>({_params.src_tuple_id}),
605
0
                                      std::vector<bool>({false})));
606
607
0
    if (!_pre_filter_texprs.empty()) {
  Branch (607:9): [True: 0, False: 0]
608
0
        DCHECK(_pre_filter_texprs.size() == 1);
609
0
        RETURN_IF_ERROR(
610
0
                vectorized::VExpr::create_expr_tree(_pre_filter_texprs[0], _pre_filter_ctx_ptr));
611
0
        RETURN_IF_ERROR(_pre_filter_ctx_ptr->prepare(_runtime_state.get(), *_row_desc));
612
0
        RETURN_IF_ERROR(_pre_filter_ctx_ptr->open(_runtime_state.get()));
613
0
    }
614
615
0
    _dest_tuple_desc = _runtime_state->desc_tbl().get_tuple_descriptor(_params.dest_tuple_id);
616
0
    if (_dest_tuple_desc == nullptr) {
  Branch (616:9): [True: 0, False: 0]
617
0
        return Status::InternalError("Unknown dest tuple descriptor, tuple_id={}",
618
0
                                     _params.dest_tuple_id);
619
0
    }
620
0
    bool has_slot_id_map = _params.__isset.dest_sid_to_src_sid_without_trans;
621
0
    for (auto slot_desc : _dest_tuple_desc->slots()) {
  Branch (621:25): [True: 0, False: 0]
622
0
        if (!slot_desc->is_materialized()) {
  Branch (622:13): [True: 0, False: 0]
623
0
            continue;
624
0
        }
625
0
        auto it = _params.expr_of_dest_slot.find(slot_desc->id());
626
0
        if (it == std::end(_params.expr_of_dest_slot)) {
  Branch (626:13): [True: 0, False: 0]
627
0
            return Status::InternalError("No expr for dest slot, id={}, name={}", slot_desc->id(),
628
0
                                         slot_desc->col_name());
629
0
        }
630
631
0
        vectorized::VExprContextSPtr ctx;
632
0
        RETURN_IF_ERROR(vectorized::VExpr::create_expr_tree(it->second, ctx));
633
0
        RETURN_IF_ERROR(ctx->prepare(_runtime_state.get(), *_row_desc.get()));
634
0
        RETURN_IF_ERROR(ctx->open(_runtime_state.get()));
635
0
        _dest_expr_ctxs.emplace_back(ctx);
636
0
        if (has_slot_id_map) {
  Branch (636:13): [True: 0, False: 0]
637
0
            auto it1 = _params.dest_sid_to_src_sid_without_trans.find(slot_desc->id());
638
0
            if (it1 == std::end(_params.dest_sid_to_src_sid_without_trans)) {
  Branch (638:17): [True: 0, False: 0]
639
0
                _src_slot_descs_order_by_dest.emplace_back(nullptr);
640
0
            } else {
641
0
                auto _src_slot_it = src_slot_desc_map.find(it1->second);
642
0
                if (_src_slot_it == std::end(src_slot_desc_map)) {
  Branch (642:21): [True: 0, False: 0]
643
0
                    return Status::InternalError("No src slot {} in src slot descs", it1->second);
644
0
                }
645
0
                _dest_slot_to_src_slot_index.emplace(_src_slot_descs_order_by_dest.size(),
646
0
                                                     src_slot_desc_to_index[_src_slot_it->second]);
647
0
                _src_slot_descs_order_by_dest.emplace_back(_src_slot_it->second);
648
0
            }
649
0
        }
650
0
    }
651
0
    return Status::OK();
652
0
}
653
654
0
Status PushBrokerReader::_get_next_reader() {
655
0
    _cur_reader.reset(nullptr);
656
0
    if (_next_range >= _file_ranges.size()) {
  Branch (656:9): [True: 0, False: 0]
657
0
        _eof = true;
658
0
        return Status::OK();
659
0
    }
660
0
    const TFileRangeDesc& range = _file_ranges[_next_range++];
661
0
    Status init_status;
662
0
    switch (_file_params.format_type) {
663
0
    case TFileFormatType::FORMAT_PARQUET: {
  Branch (663:5): [True: 0, False: 0]
664
0
        std::unique_ptr<vectorized::ParquetReader> parquet_reader =
665
0
                vectorized::ParquetReader::create_unique(
666
0
                        _runtime_profile, _file_params, range,
667
0
                        _runtime_state->query_options().batch_size,
668
0
                        const_cast<cctz::time_zone*>(&_runtime_state->timezone_obj()),
669
0
                        _io_ctx.get(), _runtime_state.get());
670
671
0
        init_status = parquet_reader->init_reader(
672
0
                _all_col_names, _colname_to_value_range, _push_down_exprs, _real_tuple_desc,
673
0
                _default_val_row_desc.get(), _col_name_to_slot_id,
674
0
                &_not_single_slot_filter_conjuncts, &_slot_id_to_filter_conjuncts,
675
0
                vectorized::TableSchemaChangeHelper::ConstNode::get_instance(), false);
676
0
        _cur_reader = std::move(parquet_reader);
677
0
        if (!init_status.ok()) {
  Branch (677:13): [True: 0, False: 0]
678
0
            return Status::InternalError("failed to init reader for file {}, err: {}", range.path,
679
0
                                         init_status.to_string());
680
0
        }
681
0
        std::unordered_map<std::string, std::tuple<std::string, const SlotDescriptor*>>
682
0
                partition_columns;
683
0
        std::unordered_map<std::string, vectorized::VExprContextSPtr> missing_columns;
684
0
        RETURN_IF_ERROR(_cur_reader->get_columns(&_name_to_col_type, &_missing_cols));
685
0
        RETURN_IF_ERROR(_cur_reader->set_fill_columns(partition_columns, missing_columns));
686
0
        break;
687
0
    }
688
0
    default:
  Branch (688:5): [True: 0, False: 0]
689
0
        return Status::Error<PUSH_INIT_ERROR>("Unsupported file format type: {}",
690
0
                                              _file_params.format_type);
691
0
    }
692
0
    _cur_reader_eof = false;
693
694
0
    return Status::OK();
695
0
}
696
697
} // namespace doris