Coverage Report

Created: 2026-03-20 21:33

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/storage/segment/segment_writer.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "storage/segment/segment_writer.h"
19
20
#include <assert.h>
21
#include <gen_cpp/segment_v2.pb.h>
22
#include <parallel_hashmap/phmap.h>
23
24
#include <algorithm>
25
26
// IWYU pragma: no_include <opentelemetry/common/threadlocal.h>
27
#include <crc32c/crc32c.h>
28
29
#include "cloud/config.h"
30
#include "common/cast_set.h"
31
#include "common/compiler_util.h" // IWYU pragma: keep
32
#include "common/config.h"
33
#include "common/logging.h" // LOG
34
#include "common/status.h"
35
#include "core/block/block.h"
36
#include "core/block/column_with_type_and_name.h"
37
#include "core/column/column_nullable.h"
38
#include "core/data_type/primitive_type.h"
39
#include "core/field.h"
40
#include "core/types.h"
41
#include "core/value/vdatetime_value.h"
42
#include "exec/common/variant_util.h"
43
#include "io/cache/block_file_cache.h"
44
#include "io/cache/block_file_cache_factory.h"
45
#include "io/fs/file_system.h"
46
#include "io/fs/file_writer.h"
47
#include "io/fs/local_file_system.h"
48
#include "runtime/exec_env.h"
49
#include "runtime/memory/mem_tracker.h"
50
#include "service/point_query_executor.h"
51
#include "storage/data_dir.h"
52
#include "storage/index/index_file_writer.h"
53
#include "storage/index/index_writer.h"
54
#include "storage/index/inverted/inverted_index_fs_directory.h"
55
#include "storage/index/primary_key_index.h"
56
#include "storage/index/short_key_index.h"
57
#include "storage/iterator/olap_data_convertor.h"
58
#include "storage/key_coder.h"
59
#include "storage/olap_common.h"
60
#include "storage/olap_define.h"
61
#include "storage/partial_update_info.h"
62
#include "storage/rowset/rowset_writer_context.h" // RowsetWriterContext
63
#include "storage/rowset/segment_creator.h"
64
#include "storage/segment/column_writer.h" // ColumnWriter
65
#include "storage/segment/external_col_meta_util.h"
66
#include "storage/segment/page_io.h"
67
#include "storage/segment/page_pointer.h"
68
#include "storage/segment/segment_loader.h"
69
#include "storage/segment/variant/variant_ext_meta_writer.h"
70
#include "storage/segment/variant_stats_calculator.h"
71
#include "storage/storage_engine.h"
72
#include "storage/tablet/tablet_schema.h"
73
#include "storage/utils.h"
74
#include "util/coding.h"
75
#include "util/faststring.h"
76
#include "util/jsonb/serialize.h"
77
#include "util/simd/bits.h"
78
namespace doris {
79
namespace segment_v2 {
80
#include "common/compile_check_begin.h"
81
82
using namespace ErrorCode;
83
using namespace KeyConsts;
84
85
const char* k_segment_magic = "D0R1";
86
const uint32_t k_segment_magic_length = 4;
87
88
5.26k
inline std::string segment_mem_tracker_name(uint32_t segment_id) {
89
5.26k
    return "SegmentWriter:Segment-" + std::to_string(segment_id);
90
5.26k
}
91
92
SegmentWriter::SegmentWriter(io::FileWriter* file_writer, uint32_t segment_id,
93
                             TabletSchemaSPtr tablet_schema, BaseTabletSPtr tablet,
94
                             DataDir* data_dir, const SegmentWriterOptions& opts,
95
                             IndexFileWriter* index_file_writer)
96
5.26k
        : _segment_id(segment_id),
97
5.26k
          _tablet_schema(std::move(tablet_schema)),
98
5.26k
          _tablet(std::move(tablet)),
99
5.26k
          _data_dir(data_dir),
100
5.26k
          _opts(opts),
101
5.26k
          _file_writer(file_writer),
102
5.26k
          _index_file_writer(index_file_writer),
103
5.26k
          _mem_tracker(std::make_unique<MemTracker>(segment_mem_tracker_name(segment_id))),
104
5.26k
          _mow_context(std::move(opts.mow_ctx)) {
105
5.26k
    CHECK_NOTNULL(file_writer);
106
5.26k
    _num_sort_key_columns = _tablet_schema->num_key_columns();
107
5.26k
    _num_short_key_columns = _tablet_schema->num_short_key_columns();
108
5.26k
    if (!_is_mow_with_cluster_key()) {
109
5.26k
        DCHECK(_num_sort_key_columns >= _num_short_key_columns)
110
0
                << ", table_id=" << _tablet_schema->table_id()
111
0
                << ", num_key_columns=" << _num_sort_key_columns
112
0
                << ", num_short_key_columns=" << _num_short_key_columns
113
0
                << ", cluster_key_columns=" << _tablet_schema->cluster_key_uids().size();
114
5.26k
    }
115
8.88k
    for (size_t cid = 0; cid < _num_sort_key_columns; ++cid) {
116
3.62k
        const auto& column = _tablet_schema->column(cid);
117
3.62k
        _key_coders.push_back(get_key_coder(column.type()));
118
3.62k
        _key_index_size.push_back(cast_set<uint16_t>(column.index_length()));
119
3.62k
    }
120
5.26k
    if (_is_mow()) {
121
        // encode the sequence id into the primary key index
122
69
        if (_tablet_schema->has_sequence_col()) {
123
26
            const auto& column = _tablet_schema->column(_tablet_schema->sequence_col_idx());
124
26
            _seq_coder = get_key_coder(column.type());
125
26
        }
126
        // encode the rowid into the primary key index
127
69
        if (_is_mow_with_cluster_key()) {
128
0
            const auto* type_info = get_scalar_type_info<FieldType::OLAP_FIELD_TYPE_UNSIGNED_INT>();
129
0
            _rowid_coder = get_key_coder(type_info->type());
130
            // primary keys
131
0
            _primary_key_coders.swap(_key_coders);
132
            // cluster keys
133
0
            _key_coders.clear();
134
0
            _key_index_size.clear();
135
0
            _num_sort_key_columns = _tablet_schema->cluster_key_uids().size();
136
0
            for (auto cid : _tablet_schema->cluster_key_uids()) {
137
0
                const auto& column = _tablet_schema->column_by_uid(cid);
138
0
                _key_coders.push_back(get_key_coder(column.type()));
139
0
                _key_index_size.push_back(cast_set<uint16_t>(column.index_length()));
140
0
            }
141
0
        }
142
69
    }
143
5.26k
}
144
145
5.26k
SegmentWriter::~SegmentWriter() {
146
5.26k
    _mem_tracker->release(_mem_tracker->consumption());
147
5.26k
}
148
149
void SegmentWriter::init_column_meta(ColumnMetaPB* meta, uint32_t column_id,
150
15.9k
                                     const TabletColumn& column, TabletSchemaSPtr tablet_schema) {
151
15.9k
    meta->set_column_id(column_id);
152
15.9k
    meta->set_type(int(column.type()));
153
15.9k
    meta->set_length(column.length());
154
15.9k
    meta->set_encoding(DEFAULT_ENCODING);
155
15.9k
    meta->set_compression(_opts.compression_type);
156
15.9k
    meta->set_is_nullable(column.is_nullable());
157
15.9k
    meta->set_default_value(column.default_value());
158
15.9k
    meta->set_precision(column.precision());
159
15.9k
    meta->set_frac(column.frac());
160
15.9k
    if (column.has_path_info()) {
161
298
        column.path_info_ptr()->to_protobuf(meta->mutable_column_path_info(),
162
298
                                            column.parent_unique_id());
163
298
    }
164
15.9k
    meta->set_unique_id(column.unique_id());
165
15.9k
    for (uint32_t i = 0; i < column.get_subtype_count(); ++i) {
166
13
        init_column_meta(meta->add_children_columns(), column_id, column.get_sub_column(i),
167
13
                         tablet_schema);
168
13
    }
169
15.9k
    meta->set_result_is_nullable(column.get_result_is_nullable());
170
15.9k
    meta->set_function_name(column.get_aggregation_name());
171
15.9k
    meta->set_be_exec_version(column.get_be_exec_version());
172
15.9k
    if (column.is_variant_type()) {
173
298
        meta->set_variant_max_subcolumns_count(column.variant_max_subcolumns_count());
174
298
    }
175
15.9k
}
176
177
1.25k
Status SegmentWriter::init() {
178
1.25k
    std::vector<uint32_t> column_ids;
179
1.25k
    auto column_cnt = cast_set<int>(_tablet_schema->num_columns());
180
8.10k
    for (uint32_t i = 0; i < column_cnt; ++i) {
181
6.85k
        column_ids.emplace_back(i);
182
6.85k
    }
183
1.25k
    return init(column_ids, true);
184
1.25k
}
185
186
Status SegmentWriter::_create_column_writer(uint32_t cid, const TabletColumn& column,
187
15.9k
                                            const TabletSchemaSPtr& schema) {
188
15.9k
    ColumnWriterOptions opts;
189
15.9k
    opts.meta = _footer.add_columns();
190
191
15.9k
    init_column_meta(opts.meta, cid, column, schema);
192
193
    // now we create zone map for key columns in AGG_KEYS or all column in UNIQUE_KEYS or DUP_KEYS
194
    // except for columns whose type don't support zone map.
195
15.9k
    opts.need_zone_map = column.is_key() || schema->keys_type() != KeysType::AGG_KEYS;
196
15.9k
    opts.need_bloom_filter = column.is_bf_column();
197
15.9k
    if (opts.need_bloom_filter) {
198
4
        opts.bf_options.fpp = schema->has_bf_fpp() ? schema->bloom_filter_fpp() : 0.05;
199
4
    }
200
15.9k
    auto* tablet_index = schema->get_ngram_bf_index(column.unique_id());
201
15.9k
    if (tablet_index) {
202
0
        opts.need_bloom_filter = true;
203
0
        opts.is_ngram_bf_index = true;
204
        //narrow convert from int32_t to uint8_t and uint16_t which is dangerous
205
0
        auto gram_size = tablet_index->get_gram_size();
206
0
        auto gram_bf_size = tablet_index->get_gram_bf_size();
207
0
        if (gram_size > 256 || gram_size < 1) {
208
0
            return Status::NotSupported("Do not support ngram bloom filter for ngram_size: ",
209
0
                                        gram_size);
210
0
        }
211
0
        if (gram_bf_size > 65535 || gram_bf_size < 64) {
212
0
            return Status::NotSupported("Do not support ngram bloom filter for bf_size: ",
213
0
                                        gram_bf_size);
214
0
        }
215
0
        opts.gram_size = cast_set<uint8_t>(gram_size);
216
0
        opts.gram_bf_size = cast_set<uint16_t>(gram_bf_size);
217
0
    }
218
219
15.9k
    bool skip_inverted_index = false;
220
15.9k
    if (_opts.rowset_ctx != nullptr) {
221
        // skip write inverted index for index compaction column
222
13.7k
        skip_inverted_index =
223
13.7k
                _opts.rowset_ctx->columns_to_do_index_compaction.count(column.unique_id()) > 0;
224
13.7k
    }
225
    // skip write inverted index on load if skip_write_index_on_load is true
226
15.9k
    if (_opts.write_type == DataWriteType::TYPE_DIRECT && schema->skip_write_index_on_load()) {
227
0
        skip_inverted_index = true;
228
0
    }
229
    // indexes for this column
230
15.9k
    if (!skip_inverted_index) {
231
15.5k
        auto inverted_indexs = schema->inverted_indexs(column);
232
15.5k
        if (!inverted_indexs.empty()) {
233
2.18k
            opts.inverted_indexes = inverted_indexs;
234
2.18k
            opts.need_inverted_index = true;
235
2.18k
            DCHECK(_index_file_writer != nullptr);
236
2.18k
        }
237
15.5k
    }
238
    // indexes for this column
239
15.9k
    if (const auto& index = schema->ann_index(column); index != nullptr) {
240
1
        opts.ann_index = index;
241
1
        opts.need_ann_index = true;
242
1
        DCHECK(_index_file_writer != nullptr);
243
1
    }
244
245
15.9k
    opts.index_file_writer = _index_file_writer;
246
247
15.9k
#define DISABLE_INDEX_IF_FIELD_TYPE(TYPE)                     \
248
143k
    if (column.type() == FieldType::OLAP_FIELD_TYPE_##TYPE) { \
249
301
        opts.need_zone_map = false;                           \
250
301
        opts.need_bloom_filter = false;                       \
251
301
    }
252
253
15.9k
    DISABLE_INDEX_IF_FIELD_TYPE(STRUCT)
254
15.9k
    DISABLE_INDEX_IF_FIELD_TYPE(ARRAY)
255
15.9k
    DISABLE_INDEX_IF_FIELD_TYPE(JSONB)
256
15.9k
    DISABLE_INDEX_IF_FIELD_TYPE(AGG_STATE)
257
15.9k
    DISABLE_INDEX_IF_FIELD_TYPE(MAP)
258
15.9k
    DISABLE_INDEX_IF_FIELD_TYPE(BITMAP)
259
15.9k
    DISABLE_INDEX_IF_FIELD_TYPE(HLL)
260
15.9k
    DISABLE_INDEX_IF_FIELD_TYPE(QUANTILE_STATE)
261
15.9k
    DISABLE_INDEX_IF_FIELD_TYPE(VARIANT)
262
263
15.9k
#undef DISABLE_INDEX_IF_FIELD_TYPE
264
265
15.9k
    int64_t storage_page_size = _tablet_schema->storage_page_size();
266
    // storage_page_size must be between 4KB and 10MB.
267
15.9k
    if (storage_page_size >= 4096 && storage_page_size <= 10485760) {
268
15.9k
        opts.data_page_size = storage_page_size;
269
15.9k
    }
270
15.9k
    opts.dict_page_size = _tablet_schema->storage_dict_page_size();
271
15.9k
    DBUG_EXECUTE_IF("VerticalSegmentWriter._create_column_writer.storage_page_size", {
272
15.9k
        auto table_id = DebugPoints::instance()->get_debug_param_or_default<int64_t>(
273
15.9k
                "VerticalSegmentWriter._create_column_writer.storage_page_size", "table_id",
274
15.9k
                INT_MIN);
275
15.9k
        auto target_data_page_size = DebugPoints::instance()->get_debug_param_or_default<int64_t>(
276
15.9k
                "VerticalSegmentWriter._create_column_writer.storage_page_size",
277
15.9k
                "storage_page_size", INT_MIN);
278
15.9k
        if (table_id == INT_MIN || target_data_page_size == INT_MIN) {
279
15.9k
            return Status::Error<ErrorCode::INTERNAL_ERROR>(
280
15.9k
                    "Debug point parameters missing: either 'table_id' or 'storage_page_size' not "
281
15.9k
                    "set.");
282
15.9k
        }
283
15.9k
        if (table_id == _tablet_schema->table_id() &&
284
15.9k
            opts.data_page_size != target_data_page_size) {
285
15.9k
            return Status::Error<ErrorCode::INTERNAL_ERROR>(
286
15.9k
                    "Mismatch in 'storage_page_size': expected size does not match the current "
287
15.9k
                    "data page size. "
288
15.9k
                    "Expected: " +
289
15.9k
                    std::to_string(target_data_page_size) +
290
15.9k
                    ", Actual: " + std::to_string(opts.data_page_size) + ".");
291
15.9k
        }
292
15.9k
    })
293
15.9k
    if (column.is_row_store_column()) {
294
        // smaller page size for row store column
295
0
        auto page_size = _tablet_schema->row_store_page_size();
296
0
        opts.data_page_size =
297
0
                (page_size > 0) ? page_size : segment_v2::ROW_STORE_PAGE_SIZE_DEFAULT_VALUE;
298
0
    }
299
300
15.9k
    opts.rowset_ctx = _opts.rowset_ctx;
301
15.9k
    opts.file_writer = _file_writer;
302
15.9k
    opts.compression_type = _opts.compression_type;
303
15.9k
    opts.footer = &_footer;
304
15.9k
    if (_opts.rowset_ctx != nullptr) {
305
13.7k
        opts.input_rs_readers = _opts.rowset_ctx->input_rs_readers;
306
13.7k
    }
307
15.9k
    opts.encoding_preference = {.integer_type_default_use_plain_encoding =
308
15.9k
                                        _tablet_schema->integer_type_default_use_plain_encoding(),
309
15.9k
                                .binary_plain_encoding_default_impl =
310
15.9k
                                        _tablet_schema->binary_plain_encoding_default_impl()};
311
312
15.9k
    std::unique_ptr<ColumnWriter> writer;
313
15.9k
    RETURN_IF_ERROR(ColumnWriter::create(opts, &column, _file_writer, &writer));
314
15.9k
    RETURN_IF_ERROR(writer->init());
315
15.9k
    _column_writers.push_back(std::move(writer));
316
317
15.9k
    _olap_data_convertor->add_column_data_convertor(column);
318
15.9k
    return Status::OK();
319
15.9k
}
320
321
7.75k
Status SegmentWriter::init(const std::vector<uint32_t>& col_ids, bool has_key) {
322
7.75k
    DCHECK(_column_writers.empty());
323
7.75k
    DCHECK(_column_ids.empty());
324
7.75k
    _has_key = has_key;
325
7.75k
    _column_writers.reserve(_tablet_schema->columns().size());
326
7.75k
    _column_ids.insert(_column_ids.end(), col_ids.begin(), col_ids.end());
327
7.75k
    _olap_data_convertor = std::make_unique<OlapBlockDataConvertor>();
328
7.75k
    if (_opts.compression_type == UNKNOWN_COMPRESSION) {
329
5.26k
        _opts.compression_type = _tablet_schema->compression_type();
330
5.26k
    }
331
332
7.75k
    RETURN_IF_ERROR(_create_writers(_tablet_schema, col_ids));
333
334
    // Initialize variant statistics calculator
335
7.75k
    _variant_stats_calculator =
336
7.75k
            std::make_unique<VariantStatsCaculator>(&_footer, _tablet_schema, col_ids);
337
338
    // we don't need the short key index for unique key merge on write table.
339
7.75k
    if (_has_key) {
340
5.26k
        if (_is_mow()) {
341
69
            size_t seq_col_length = 0;
342
69
            if (_tablet_schema->has_sequence_col()) {
343
26
                seq_col_length =
344
26
                        _tablet_schema->column(_tablet_schema->sequence_col_idx()).length() + 1;
345
26
            }
346
69
            size_t rowid_length = 0;
347
69
            if (_is_mow_with_cluster_key()) {
348
0
                rowid_length = PrimaryKeyIndexReader::ROW_ID_LENGTH;
349
0
                _short_key_index_builder.reset(
350
0
                        new ShortKeyIndexBuilder(_segment_id, _opts.num_rows_per_block));
351
0
            }
352
69
            _primary_key_index_builder.reset(
353
69
                    new PrimaryKeyIndexBuilder(_file_writer, seq_col_length, rowid_length));
354
69
            RETURN_IF_ERROR(_primary_key_index_builder->init());
355
5.19k
        } else {
356
5.19k
            _short_key_index_builder.reset(
357
5.19k
                    new ShortKeyIndexBuilder(_segment_id, _opts.num_rows_per_block));
358
5.19k
        }
359
5.26k
    }
360
7.75k
    return Status::OK();
361
7.75k
}
362
363
Status SegmentWriter::_create_writers(const TabletSchemaSPtr& tablet_schema,
364
7.75k
                                      const std::vector<uint32_t>& col_ids) {
365
7.75k
    _olap_data_convertor->reserve(col_ids.size());
366
15.9k
    for (auto& cid : col_ids) {
367
15.9k
        RETURN_IF_ERROR(_create_column_writer(cid, tablet_schema->column(cid), tablet_schema));
368
15.9k
    }
369
7.75k
    return Status::OK();
370
7.75k
}
371
372
800
void SegmentWriter::_maybe_invalid_row_cache(const std::string& key) {
373
    // Just invalid row cache for simplicity, since the rowset is not visible at present.
374
    // If we update/insert cache, if load failed rowset will not be visible but cached data
375
    // will be visible, and lead to inconsistency.
376
800
    if (!config::disable_storage_row_cache && _tablet_schema->has_row_store_for_all_columns() &&
377
800
        _opts.write_type == DataWriteType::TYPE_DIRECT) {
378
        // invalidate cache
379
0
        RowCache::instance()->erase({_opts.rowset_ctx->tablet_id, key});
380
0
    }
381
800
}
382
383
4
void SegmentWriter::_serialize_block_to_row_column(const Block& block) {
384
4
    if (block.rows() == 0) {
385
0
        return;
386
0
    }
387
4
    MonotonicStopWatch watch;
388
4
    watch.start();
389
4
    int row_column_id = 0;
390
8
    for (int i = 0; i < _tablet_schema->num_columns(); ++i) {
391
4
        if (_tablet_schema->column(i).is_row_store_column()) {
392
0
            auto* row_store_column = static_cast<ColumnString*>(
393
0
                    block.get_by_position(i).column->assume_mutable_ref().assume_mutable().get());
394
0
            row_store_column->clear();
395
0
            DataTypeSerDeSPtrs serdes = create_data_type_serdes(block.get_data_types());
396
0
            JsonbSerializeUtil::block_to_jsonb(*_tablet_schema, block, *row_store_column,
397
0
                                               cast_set<int>(_tablet_schema->num_columns()), serdes,
398
0
                                               {_tablet_schema->row_columns_uids().begin(),
399
0
                                                _tablet_schema->row_columns_uids().end()});
400
0
            break;
401
0
        }
402
4
    }
403
404
4
    VLOG_DEBUG << "serialize , num_rows:" << block.rows() << ", row_column_id:" << row_column_id
405
0
               << ", total_byte_size:" << block.allocated_bytes() << ", serialize_cost(us)"
406
0
               << watch.elapsed_time() / 1000;
407
4
}
408
409
Status SegmentWriter::probe_key_for_mow(
410
        std::string key, std::size_t segment_pos, bool have_input_seq_column, bool have_delete_sign,
411
        const std::vector<RowsetSharedPtr>& specified_rowsets,
412
        std::vector<std::unique_ptr<SegmentCacheHandle>>& segment_caches,
413
        bool& has_default_or_nullable, std::vector<bool>& use_default_or_null_flag,
414
        const std::function<void(const RowLocation& loc)>& found_cb,
415
0
        const std::function<Status()>& not_found_cb, PartialUpdateStats& stats) {
416
0
    RowLocation loc;
417
    // save rowset shared ptr so this rowset wouldn't delete
418
0
    RowsetSharedPtr rowset;
419
0
    auto st = _tablet->lookup_row_key(
420
0
            key, _tablet_schema.get(), have_input_seq_column, specified_rowsets, &loc,
421
0
            cast_set<uint32_t>(_mow_context->max_version), segment_caches, &rowset);
422
0
    if (st.is<KEY_NOT_FOUND>()) {
423
0
        if (!have_delete_sign) {
424
0
            RETURN_IF_ERROR(not_found_cb());
425
0
        }
426
0
        ++stats.num_rows_new_added;
427
0
        has_default_or_nullable = true;
428
0
        use_default_or_null_flag.emplace_back(true);
429
0
        return Status::OK();
430
0
    }
431
0
    if (!st.ok() && !st.is<KEY_ALREADY_EXISTS>()) {
432
0
        LOG(WARNING) << "failed to lookup row key, error: " << st;
433
0
        return st;
434
0
    }
435
436
    // 1. if the delete sign is marked, it means that the value columns of the row will not
437
    //    be read. So we don't need to read the missing values from the previous rows.
438
    // 2. the one exception is when there are sequence columns in the table, we need to read
439
    //    the sequence columns, otherwise it may cause the merge-on-read based compaction
440
    //    policy to produce incorrect results
441
    // TODO(bobhan1): only read seq col rather than all columns in this situation for
442
    // partial update and flexible partial update
443
444
    // TODO(bobhan1): handle sequence column here
445
0
    if (st.is<KEY_ALREADY_EXISTS>() || (have_delete_sign && !_tablet_schema->has_sequence_col())) {
446
0
        has_default_or_nullable = true;
447
0
        use_default_or_null_flag.emplace_back(true);
448
0
    } else {
449
        // partial update should not contain invisible columns
450
0
        use_default_or_null_flag.emplace_back(false);
451
0
        _rsid_to_rowset.emplace(rowset->rowset_id(), rowset);
452
0
        found_cb(loc);
453
0
    }
454
455
0
    if (st.is<KEY_ALREADY_EXISTS>()) {
456
        // although we need to mark delete current row, we still need to read missing columns
457
        // for this row, we need to ensure that each column is aligned
458
0
        _mow_context->delete_bitmap->add(
459
0
                {_opts.rowset_ctx->rowset_id, _segment_id, DeleteBitmap::TEMP_VERSION_COMMON},
460
0
                cast_set<uint32_t>(segment_pos));
461
0
        ++stats.num_rows_deleted;
462
0
    } else {
463
0
        _mow_context->delete_bitmap->add(
464
0
                {loc.rowset_id, loc.segment_id, DeleteBitmap::TEMP_VERSION_COMMON}, loc.row_id);
465
0
        ++stats.num_rows_updated;
466
0
    }
467
0
    return Status::OK();
468
0
}
469
470
0
Status SegmentWriter::partial_update_preconditions_check(size_t row_pos) {
471
0
    if (!_is_mow()) {
472
0
        auto msg = fmt::format(
473
0
                "Can only do partial update on merge-on-write unique table, but found: "
474
0
                "keys_type={}, _opts.enable_unique_key_merge_on_write={}, tablet_id={}",
475
0
                _tablet_schema->keys_type(), _opts.enable_unique_key_merge_on_write,
476
0
                _tablet->tablet_id());
477
0
        DCHECK(false) << msg;
478
0
        return Status::InternalError<false>(msg);
479
0
    }
480
0
    if (_opts.rowset_ctx->partial_update_info == nullptr) {
481
0
        auto msg =
482
0
                fmt::format("partial_update_info should not be nullptr, please check, tablet_id={}",
483
0
                            _tablet->tablet_id());
484
0
        DCHECK(false) << msg;
485
0
        return Status::InternalError<false>(msg);
486
0
    }
487
0
    if (!_opts.rowset_ctx->partial_update_info->is_fixed_partial_update()) {
488
0
        auto msg = fmt::format(
489
0
                "in fixed partial update code, but update_mode={}, please check, tablet_id={}",
490
0
                _opts.rowset_ctx->partial_update_info->update_mode(), _tablet->tablet_id());
491
0
        DCHECK(false) << msg;
492
0
        return Status::InternalError<false>(msg);
493
0
    }
494
0
    if (row_pos != 0) {
495
0
        auto msg = fmt::format("row_pos should be 0, but found {}, tablet_id={}", row_pos,
496
0
                               _tablet->tablet_id());
497
0
        DCHECK(false) << msg;
498
0
        return Status::InternalError<false>(msg);
499
0
    }
500
0
    return Status::OK();
501
0
}
502
503
// for partial update, we should do following steps to fill content of block:
504
// 1. set block data to data convertor, and get all key_column's converted slice
505
// 2. get pk of input block, and read missing columns
506
//       2.1 first find key location{rowset_id, segment_id, row_id}
507
//       2.2 build read plan to read by batch
508
//       2.3 fill block
509
// 3. set columns to data convertor and then write all columns
510
Status SegmentWriter::append_block_with_partial_content(const Block* block, size_t row_pos,
511
0
                                                        size_t num_rows) {
512
0
    if (block->columns() < _tablet_schema->num_key_columns() ||
513
0
        block->columns() >= _tablet_schema->num_columns()) {
514
0
        return Status::InvalidArgument(
515
0
                fmt::format("illegal partial update block columns: {}, num key columns: {}, total "
516
0
                            "schema columns: {}",
517
0
                            block->columns(), _tablet_schema->num_key_columns(),
518
0
                            _tablet_schema->num_columns()));
519
0
    }
520
0
    RETURN_IF_ERROR(partial_update_preconditions_check(row_pos));
521
522
    // find missing column cids
523
0
    const auto& missing_cids = _opts.rowset_ctx->partial_update_info->missing_cids;
524
0
    const auto& including_cids = _opts.rowset_ctx->partial_update_info->update_cids;
525
526
    // create full block and fill with input columns
527
0
    auto full_block = _tablet_schema->create_block();
528
0
    size_t input_id = 0;
529
0
    for (auto i : including_cids) {
530
0
        full_block.replace_by_position(i, block->get_by_position(input_id++).column);
531
0
    }
532
533
0
    if (_opts.rowset_ctx->write_type != DataWriteType::TYPE_COMPACTION &&
534
0
        _tablet_schema->num_variant_columns() > 0) {
535
0
        RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns(
536
0
                full_block, *_tablet_schema, including_cids));
537
0
    }
538
0
    RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_columns(
539
0
            &full_block, row_pos, num_rows, including_cids));
540
541
0
    bool have_input_seq_column = false;
542
    // write including columns
543
0
    std::vector<IOlapColumnDataAccessor*> key_columns;
544
0
    IOlapColumnDataAccessor* seq_column = nullptr;
545
0
    size_t segment_start_pos = 0;
546
0
    for (auto cid : including_cids) {
547
        // here we get segment column row num before append data.
548
0
        segment_start_pos = _column_writers[cid]->get_next_rowid();
549
        // olap data convertor alway start from id = 0
550
0
        auto converted_result = _olap_data_convertor->convert_column_data(cid);
551
0
        if (!converted_result.first.ok()) {
552
0
            return converted_result.first;
553
0
        }
554
0
        if (cid < _num_sort_key_columns) {
555
0
            key_columns.push_back(converted_result.second);
556
0
        } else if (_tablet_schema->has_sequence_col() &&
557
0
                   cid == _tablet_schema->sequence_col_idx()) {
558
0
            seq_column = converted_result.second;
559
0
            have_input_seq_column = true;
560
0
        }
561
0
        RETURN_IF_ERROR(_column_writers[cid]->append(converted_result.second->get_nullmap(),
562
0
                                                     converted_result.second->get_data(),
563
0
                                                     num_rows));
564
0
    }
565
566
0
    bool has_default_or_nullable = false;
567
0
    std::vector<bool> use_default_or_null_flag;
568
0
    use_default_or_null_flag.reserve(num_rows);
569
0
    const auto* delete_signs =
570
0
            BaseTablet::get_delete_sign_column_data(full_block, row_pos + num_rows);
571
572
0
    const std::vector<RowsetSharedPtr>& specified_rowsets = _mow_context->rowset_ptrs;
573
0
    std::vector<std::unique_ptr<SegmentCacheHandle>> segment_caches(specified_rowsets.size());
574
575
0
    FixedReadPlan read_plan;
576
577
    // locate rows in base data
578
0
    PartialUpdateStats stats;
579
580
0
    for (size_t block_pos = row_pos; block_pos < row_pos + num_rows; block_pos++) {
581
        // block   segment
582
        //   2   ->   0
583
        //   3   ->   1
584
        //   4   ->   2
585
        //   5   ->   3
586
        // here row_pos = 2, num_rows = 4.
587
0
        size_t delta_pos = block_pos - row_pos;
588
0
        size_t segment_pos = segment_start_pos + delta_pos;
589
0
        std::string key = _full_encode_keys(key_columns, delta_pos);
590
0
        _maybe_invalid_row_cache(key);
591
0
        if (have_input_seq_column) {
592
0
            _encode_seq_column(seq_column, delta_pos, &key);
593
0
        }
594
        // If the table have sequence column, and the include-cids don't contain the sequence
595
        // column, we need to update the primary key index builder at the end of this method.
596
        // At that time, we have a valid sequence column to encode the key with seq col.
597
0
        if (!_tablet_schema->has_sequence_col() || have_input_seq_column) {
598
0
            RETURN_IF_ERROR(_primary_key_index_builder->add_item(key));
599
0
        }
600
601
        // mark key with delete sign as deleted.
602
0
        bool have_delete_sign = (delete_signs != nullptr && delete_signs[block_pos] != 0);
603
604
0
        auto not_found_cb = [&]() {
605
0
            return _opts.rowset_ctx->partial_update_info->handle_new_key(
606
0
                    *_tablet_schema, [&]() -> std::string {
607
0
                        return block->dump_one_line(block_pos,
608
0
                                                    cast_set<int>(_num_sort_key_columns));
609
0
                    });
610
0
        };
611
0
        auto update_read_plan = [&](const RowLocation& loc) {
612
0
            read_plan.prepare_to_read(loc, segment_pos);
613
0
        };
614
0
        RETURN_IF_ERROR(probe_key_for_mow(std::move(key), segment_pos, have_input_seq_column,
615
0
                                          have_delete_sign, specified_rowsets, segment_caches,
616
0
                                          has_default_or_nullable, use_default_or_null_flag,
617
0
                                          update_read_plan, not_found_cb, stats));
618
0
    }
619
0
    CHECK_EQ(use_default_or_null_flag.size(), num_rows);
620
621
0
    if (config::enable_merge_on_write_correctness_check) {
622
0
        _tablet->add_sentinel_mark_to_delete_bitmap(_mow_context->delete_bitmap.get(),
623
0
                                                    *_mow_context->rowset_ids);
624
0
    }
625
626
    // read to fill full block
627
0
    RETURN_IF_ERROR(read_plan.fill_missing_columns(
628
0
            _opts.rowset_ctx, _rsid_to_rowset, *_tablet_schema, full_block,
629
0
            use_default_or_null_flag, has_default_or_nullable,
630
0
            cast_set<uint32_t>(segment_start_pos), block));
631
632
    // convert block to row store format
633
0
    _serialize_block_to_row_column(full_block);
634
635
    // convert missing columns and send to column writer
636
0
    RETURN_IF_ERROR(_olap_data_convertor->set_source_content_with_specifid_columns(
637
0
            &full_block, row_pos, num_rows, missing_cids));
638
0
    for (auto cid : missing_cids) {
639
0
        auto converted_result = _olap_data_convertor->convert_column_data(cid);
640
0
        if (!converted_result.first.ok()) {
641
0
            return converted_result.first;
642
0
        }
643
0
        if (_tablet_schema->has_sequence_col() && !have_input_seq_column &&
644
0
            cid == _tablet_schema->sequence_col_idx()) {
645
0
            DCHECK_EQ(seq_column, nullptr);
646
0
            seq_column = converted_result.second;
647
0
        }
648
0
        RETURN_IF_ERROR(_column_writers[cid]->append(converted_result.second->get_nullmap(),
649
0
                                                     converted_result.second->get_data(),
650
0
                                                     num_rows));
651
0
    }
652
0
    _num_rows_updated += stats.num_rows_updated;
653
0
    _num_rows_deleted += stats.num_rows_deleted;
654
0
    _num_rows_new_added += stats.num_rows_new_added;
655
0
    _num_rows_filtered += stats.num_rows_filtered;
656
0
    if (_tablet_schema->has_sequence_col() && !have_input_seq_column) {
657
0
        DCHECK_NE(seq_column, nullptr);
658
0
        if (_num_rows_written != row_pos ||
659
0
            _primary_key_index_builder->num_rows() != _num_rows_written) {
660
0
            return Status::InternalError(
661
0
                    "Correctness check failed, _num_rows_written: {}, row_pos: {}, primary key "
662
0
                    "index builder num rows: {}",
663
0
                    _num_rows_written, row_pos, _primary_key_index_builder->num_rows());
664
0
        }
665
0
        RETURN_IF_ERROR(
666
0
                _generate_primary_key_index(_key_coders, key_columns, seq_column, num_rows, false));
667
0
    }
668
669
0
    _num_rows_written += num_rows;
670
0
    DCHECK_EQ(_primary_key_index_builder->num_rows(), _num_rows_written)
671
0
            << "primary key index builder num rows(" << _primary_key_index_builder->num_rows()
672
0
            << ") not equal to segment writer's num rows written(" << _num_rows_written << ")";
673
0
    _olap_data_convertor->clear_source_content();
674
675
0
    return Status::OK();
676
0
}
677
678
9.56k
Status SegmentWriter::append_block(const Block* block, size_t row_pos, size_t num_rows) {
679
9.56k
    if (_opts.rowset_ctx->partial_update_info &&
680
9.56k
        _opts.rowset_ctx->partial_update_info->is_partial_update() &&
681
9.56k
        _opts.write_type == DataWriteType::TYPE_DIRECT &&
682
9.56k
        !_opts.rowset_ctx->is_transient_rowset_writer) {
683
0
        if (_opts.rowset_ctx->partial_update_info->is_fixed_partial_update()) {
684
0
            RETURN_IF_ERROR(append_block_with_partial_content(block, row_pos, num_rows));
685
0
        } else {
686
0
            return Status::NotSupported<false>(
687
0
                    "SegmentWriter doesn't support flexible partial update, please set "
688
0
                    "enable_vertical_segment_writer=true in be.conf on all BEs to use "
689
0
                    "VerticalSegmentWriter.");
690
0
        }
691
0
        return Status::OK();
692
0
    }
693
9.56k
    if (block->columns() < _column_writers.size()) {
694
0
        return Status::InternalError(
695
0
                "block->columns() < _column_writers.size(), block->columns()=" +
696
0
                std::to_string(block->columns()) +
697
0
                ", _column_writers.size()=" + std::to_string(_column_writers.size()) +
698
0
                ", _tablet_schema->dump_structure()=" + _tablet_schema->dump_structure());
699
0
    }
700
9.56k
    CHECK(block->columns() >= _column_writers.size())
701
0
            << ", block->columns()=" << block->columns()
702
0
            << ", _column_writers.size()=" << _column_writers.size()
703
0
            << ", _tablet_schema->dump_structure()=" << _tablet_schema->dump_structure();
704
    // Row column should be filled here when it's a directly write from memtable
705
    // or it's schema change write(since column data type maybe changed, so we should reubild)
706
9.56k
    if (_opts.write_type == DataWriteType::TYPE_DIRECT ||
707
9.56k
        _opts.write_type == DataWriteType::TYPE_SCHEMA_CHANGE) {
708
4
        _serialize_block_to_row_column(*block);
709
4
    }
710
711
9.56k
    if (_opts.rowset_ctx->write_type != DataWriteType::TYPE_COMPACTION &&
712
9.56k
        _tablet_schema->num_variant_columns() > 0) {
713
139
        RETURN_IF_ERROR(variant_util::parse_and_materialize_variant_columns(
714
139
                const_cast<Block&>(*block), *_tablet_schema, _column_ids));
715
139
    }
716
717
9.56k
    _olap_data_convertor->set_source_content(block, row_pos, num_rows);
718
719
    // find all row pos for short key indexes
720
9.56k
    std::vector<size_t> short_key_pos;
721
9.56k
    if (_has_key) {
722
        // We build a short key index every `_opts.num_rows_per_block` rows. Specifically, we
723
        // build a short key index using 1st rows for first block and `_short_key_row_pos - _row_count`
724
        // for next blocks.
725
        // Ensure we build a short key index using 1st rows only for the first block (ISSUE-9766).
726
6.39k
        if (UNLIKELY(_short_key_row_pos == 0 && _num_rows_written == 0)) {
727
5.19k
            short_key_pos.push_back(0);
728
5.19k
        }
729
14.3k
        while (_short_key_row_pos + _opts.num_rows_per_block < _num_rows_written + num_rows) {
730
7.96k
            _short_key_row_pos += _opts.num_rows_per_block;
731
7.96k
            short_key_pos.push_back(_short_key_row_pos - _num_rows_written);
732
7.96k
        }
733
6.39k
    }
734
735
    // convert column data from engine format to storage layer format
736
9.56k
    std::vector<IOlapColumnDataAccessor*> key_columns;
737
9.56k
    IOlapColumnDataAccessor* seq_column = nullptr;
738
26.5k
    for (size_t id = 0; id < _column_writers.size(); ++id) {
739
        // olap data convertor alway start from id = 0
740
16.9k
        auto converted_result = _olap_data_convertor->convert_column_data(id);
741
16.9k
        if (!converted_result.first.ok()) {
742
0
            return converted_result.first;
743
0
        }
744
16.9k
        auto cid = _column_ids[id];
745
16.9k
        if (_has_key && cid < _tablet_schema->num_key_columns()) {
746
4.74k
            key_columns.push_back(converted_result.second);
747
12.2k
        } else if (_has_key && _tablet_schema->has_sequence_col() &&
748
12.2k
                   cid == _tablet_schema->sequence_col_idx()) {
749
90
            seq_column = converted_result.second;
750
90
        }
751
16.9k
        RETURN_IF_ERROR(_column_writers[id]->append(converted_result.second->get_nullmap(),
752
16.9k
                                                    converted_result.second->get_data(), num_rows));
753
16.9k
    }
754
9.56k
    if (_opts.write_type == DataWriteType::TYPE_COMPACTION) {
755
7.84k
        RETURN_IF_ERROR(
756
7.84k
                _variant_stats_calculator->calculate_variant_stats(block, row_pos, num_rows));
757
7.84k
    }
758
9.56k
    if (_has_key) {
759
6.39k
        if (_is_mow_with_cluster_key()) {
760
            // for now we don't need to query short key index for CLUSTER BY feature,
761
            // but we still write the index for future usage.
762
            // 1. generate primary key index, the key_columns is primary_key_columns
763
0
            RETURN_IF_ERROR(_generate_primary_key_index(_primary_key_coders, key_columns,
764
0
                                                        seq_column, num_rows, true));
765
            // 2. generate short key index (use cluster key)
766
0
            key_columns.clear();
767
0
            for (const auto& cid : _tablet_schema->cluster_key_uids()) {
768
                // find cluster key index in tablet schema
769
0
                auto cluster_key_index = _tablet_schema->field_index(cid);
770
0
                if (cluster_key_index == -1) {
771
0
                    return Status::InternalError(
772
0
                            "could not find cluster key column with unique_id=" +
773
0
                            std::to_string(cid) + " in tablet schema");
774
0
                }
775
0
                bool found = false;
776
0
                for (auto i = 0; i < _column_ids.size(); ++i) {
777
0
                    if (_column_ids[i] == cluster_key_index) {
778
0
                        auto converted_result = _olap_data_convertor->convert_column_data(i);
779
0
                        if (!converted_result.first.ok()) {
780
0
                            return converted_result.first;
781
0
                        }
782
0
                        key_columns.push_back(converted_result.second);
783
0
                        found = true;
784
0
                        break;
785
0
                    }
786
0
                }
787
0
                if (!found) {
788
0
                    return Status::InternalError(
789
0
                            "could not found cluster key column with unique_id=" +
790
0
                            std::to_string(cid) +
791
0
                            ", tablet schema index=" + std::to_string(cluster_key_index));
792
0
                }
793
0
            }
794
0
            RETURN_IF_ERROR(_generate_short_key_index(key_columns, num_rows, short_key_pos));
795
6.39k
        } else if (_is_mow()) {
796
10
            RETURN_IF_ERROR(_generate_primary_key_index(_key_coders, key_columns, seq_column,
797
10
                                                        num_rows, false));
798
6.38k
        } else {
799
6.38k
            RETURN_IF_ERROR(_generate_short_key_index(key_columns, num_rows, short_key_pos));
800
6.38k
        }
801
6.39k
    }
802
803
9.56k
    _num_rows_written += num_rows;
804
9.56k
    _olap_data_convertor->clear_source_content();
805
9.56k
    return Status::OK();
806
9.56k
}
807
808
2.07k
int64_t SegmentWriter::max_row_to_add(size_t row_avg_size_in_bytes) {
809
2.07k
    auto segment_size = estimate_segment_size();
810
2.07k
    if (segment_size >= MAX_SEGMENT_SIZE || _num_rows_written >= _opts.max_rows_per_segment)
811
354
            [[unlikely]] {
812
354
        return 0;
813
354
    }
814
1.71k
    int64_t size_rows = ((int64_t)MAX_SEGMENT_SIZE - (int64_t)segment_size) / row_avg_size_in_bytes;
815
1.71k
    int64_t count_rows = (int64_t)_opts.max_rows_per_segment - _num_rows_written;
816
817
1.71k
    return std::min(size_rows, count_rows);
818
2.07k
}
819
820
std::string SegmentWriter::_full_encode_keys(
821
13.5k
        const std::vector<IOlapColumnDataAccessor*>& key_columns, size_t pos, bool null_first) {
822
13.5k
    assert(_key_index_size.size() == _num_sort_key_columns);
823
13.5k
    assert(key_columns.size() == _num_sort_key_columns &&
824
13.5k
           _key_coders.size() == _num_sort_key_columns);
825
13.5k
    return _full_encode_keys(_key_coders, key_columns, pos, null_first);
826
13.5k
}
827
828
std::string SegmentWriter::_full_encode_keys(
829
        const std::vector<const KeyCoder*>& key_coders,
830
13.5k
        const std::vector<IOlapColumnDataAccessor*>& key_columns, size_t pos, bool null_first) {
831
13.5k
    assert(key_columns.size() == key_coders.size());
832
833
13.5k
    std::string encoded_keys;
834
13.5k
    size_t cid = 0;
835
13.5k
    for (const auto& column : key_columns) {
836
10.2k
        auto field = column->get_data_at(pos);
837
10.2k
        if (UNLIKELY(!field)) {
838
0
            if (null_first) {
839
0
                encoded_keys.push_back(KEY_NULL_FIRST_MARKER);
840
0
            } else {
841
0
                encoded_keys.push_back(KEY_NORMAL_MARKER);
842
0
            }
843
0
            ++cid;
844
0
            continue;
845
0
        }
846
10.2k
        encoded_keys.push_back(KEY_NORMAL_MARKER);
847
10.2k
        DCHECK(key_coders[cid] != nullptr);
848
10.2k
        key_coders[cid]->full_encode_ascending(field, &encoded_keys);
849
10.2k
        ++cid;
850
10.2k
    }
851
13.5k
    return encoded_keys;
852
13.5k
}
853
854
void SegmentWriter::_encode_seq_column(const IOlapColumnDataAccessor* seq_column, size_t pos,
855
0
                                       std::string* encoded_keys) {
856
0
    auto field = seq_column->get_data_at(pos);
857
    // To facilitate the use of the primary key index, encode the seq column
858
    // to the minimum value of the corresponding length when the seq column
859
    // is null
860
0
    if (UNLIKELY(!field)) {
861
0
        encoded_keys->push_back(KEY_NULL_FIRST_MARKER);
862
0
        size_t seq_col_length = _tablet_schema->column(_tablet_schema->sequence_col_idx()).length();
863
0
        encoded_keys->append(seq_col_length, KEY_MINIMAL_MARKER);
864
0
        return;
865
0
    }
866
0
    encoded_keys->push_back(KEY_NORMAL_MARKER);
867
0
    _seq_coder->full_encode_ascending(field, encoded_keys);
868
0
}
869
870
0
void SegmentWriter::_encode_rowid(const uint32_t rowid, std::string* encoded_keys) {
871
0
    encoded_keys->push_back(KEY_NORMAL_MARKER);
872
0
    _rowid_coder->full_encode_ascending(&rowid, encoded_keys);
873
0
}
874
875
std::string SegmentWriter::_encode_keys(const std::vector<IOlapColumnDataAccessor*>& key_columns,
876
13.1k
                                        size_t pos) {
877
13.1k
    assert(key_columns.size() == _num_short_key_columns);
878
879
13.1k
    std::string encoded_keys;
880
13.1k
    size_t cid = 0;
881
13.1k
    for (const auto& column : key_columns) {
882
10.1k
        auto field = column->get_data_at(pos);
883
10.1k
        if (UNLIKELY(!field)) {
884
0
            encoded_keys.push_back(KEY_NULL_FIRST_MARKER);
885
0
            ++cid;
886
0
            continue;
887
0
        }
888
10.1k
        encoded_keys.push_back(KEY_NORMAL_MARKER);
889
10.1k
        _key_coders[cid]->encode_ascending(field, _key_index_size[cid], &encoded_keys);
890
10.1k
        ++cid;
891
10.1k
    }
892
13.1k
    return encoded_keys;
893
13.1k
}
894
895
// TODO(lingbin): Currently this function does not include the size of various indexes,
896
// We should make this more precise.
897
// NOTE: This function will be called when any row of data is added, so we need to
898
// make this function efficient.
899
2.39k
uint64_t SegmentWriter::estimate_segment_size() {
900
    // footer_size(4) + checksum(4) + segment_magic(4)
901
2.39k
    uint64_t size = 12;
902
9.63k
    for (auto& column_writer : _column_writers) {
903
9.63k
        size += column_writer->estimate_buffer_size();
904
9.63k
    }
905
2.39k
    if (_is_mow_with_cluster_key()) {
906
0
        size += _primary_key_index_builder->size() + _short_key_index_builder->size();
907
2.39k
    } else if (_is_mow()) {
908
0
        size += _primary_key_index_builder->size();
909
2.39k
    } else {
910
2.39k
        size += _short_key_index_builder->size();
911
2.39k
    }
912
913
    // update the mem_tracker of segment size
914
2.39k
    _mem_tracker->consume(size - _mem_tracker->consumption());
915
2.39k
    return size;
916
2.39k
}
917
918
7.75k
Status SegmentWriter::finalize_columns_data() {
919
7.75k
    if (_has_key) {
920
5.26k
        _row_count = _num_rows_written;
921
5.26k
    } else {
922
2.49k
        DCHECK(_row_count == _num_rows_written)
923
0
                << "_row_count != _num_rows_written:" << _row_count << " vs. " << _num_rows_written;
924
2.49k
        if (_row_count != _num_rows_written) {
925
0
            std::stringstream ss;
926
0
            ss << "_row_count != _num_rows_written:" << _row_count << " vs. " << _num_rows_written;
927
0
            LOG(WARNING) << ss.str();
928
0
            return Status::InternalError(ss.str());
929
0
        }
930
2.49k
    }
931
7.75k
    _num_rows_written = 0;
932
933
15.9k
    for (auto& column_writer : _column_writers) {
934
15.9k
        RETURN_IF_ERROR(column_writer->finish());
935
15.9k
    }
936
7.75k
    RETURN_IF_ERROR(_write_data());
937
938
7.75k
    return Status::OK();
939
7.75k
}
940
941
7.75k
Status SegmentWriter::finalize_columns_index(uint64_t* index_size) {
942
7.75k
    uint64_t index_start = _file_writer->bytes_appended();
943
7.75k
    RETURN_IF_ERROR(_write_ordinal_index());
944
7.75k
    RETURN_IF_ERROR(_write_zone_map());
945
7.75k
    RETURN_IF_ERROR(_write_inverted_index());
946
7.75k
    RETURN_IF_ERROR(_write_ann_index());
947
7.75k
    RETURN_IF_ERROR(_write_bloom_filter_index());
948
949
7.75k
    *index_size = _file_writer->bytes_appended() - index_start;
950
7.75k
    if (_has_key) {
951
5.26k
        if (_is_mow_with_cluster_key()) {
952
            // 1. sort primary keys
953
0
            std::sort(_primary_keys.begin(), _primary_keys.end());
954
            // 2. write primary keys index
955
0
            std::string last_key;
956
0
            for (const auto& key : _primary_keys) {
957
0
                DCHECK(key.compare(last_key) > 0)
958
0
                        << "found duplicate key or key is not sorted! current key: " << key
959
0
                        << ", last key: " << last_key;
960
0
                RETURN_IF_ERROR(_primary_key_index_builder->add_item(key));
961
0
                last_key = key;
962
0
            }
963
964
0
            RETURN_IF_ERROR(_write_short_key_index());
965
0
            *index_size = _file_writer->bytes_appended() - index_start;
966
0
            RETURN_IF_ERROR(_write_primary_key_index());
967
0
            *index_size += _primary_key_index_builder->disk_size();
968
5.26k
        } else if (_is_mow()) {
969
69
            RETURN_IF_ERROR(_write_primary_key_index());
970
            // IndexedColumnWriter write data pages mixed with segment data, we should use
971
            // the stat from primary key index builder.
972
69
            *index_size += _primary_key_index_builder->disk_size();
973
5.19k
        } else {
974
5.19k
            RETURN_IF_ERROR(_write_short_key_index());
975
5.19k
            *index_size = _file_writer->bytes_appended() - index_start;
976
5.19k
        }
977
5.26k
    }
978
    // reset all column writers and data_conveter
979
7.75k
    clear();
980
981
7.75k
    return Status::OK();
982
7.75k
}
983
984
5.26k
Status SegmentWriter::finalize_footer(uint64_t* segment_file_size) {
985
5.26k
    RETURN_IF_ERROR(_write_footer());
986
    // finish
987
5.26k
    RETURN_IF_ERROR(_file_writer->close(true));
988
5.26k
    *segment_file_size = _file_writer->bytes_appended();
989
5.26k
    if (*segment_file_size == 0) {
990
0
        return Status::Corruption("Bad segment, file size = 0");
991
0
    }
992
5.26k
    return Status::OK();
993
5.26k
}
994
995
1.25k
Status SegmentWriter::finalize(uint64_t* segment_file_size, uint64_t* index_size) {
996
1.25k
    MonotonicStopWatch timer;
997
1.25k
    timer.start();
998
    // check disk capacity
999
1.25k
    if (_data_dir != nullptr && _data_dir->reach_capacity_limit((int64_t)estimate_segment_size())) {
1000
0
        return Status::Error<DISK_REACH_CAPACITY_LIMIT>("disk {} exceed capacity limit, path: {}",
1001
0
                                                        _data_dir->path_hash(), _data_dir->path());
1002
0
    }
1003
    // write data
1004
1.25k
    RETURN_IF_ERROR(finalize_columns_data());
1005
    // Get the index start before finalize_footer since this function would write new data.
1006
1.25k
    uint64_t index_start = _file_writer->bytes_appended();
1007
    // write index
1008
1.25k
    RETURN_IF_ERROR(finalize_columns_index(index_size));
1009
    // write footer
1010
1.25k
    RETURN_IF_ERROR(finalize_footer(segment_file_size));
1011
1012
1.25k
    if (timer.elapsed_time() > 5000000000l) {
1013
0
        LOG(INFO) << "segment flush consumes a lot time_ns " << timer.elapsed_time()
1014
0
                  << ", segmemt_size " << *segment_file_size;
1015
0
    }
1016
    // When the cache type is not ttl(expiration time == 0), the data should be split into normal cache queue
1017
    // and index cache queue
1018
1.25k
    if (auto* cache_builder = _file_writer->cache_builder(); cache_builder != nullptr &&
1019
1.25k
                                                             cache_builder->_expiration_time == 0 &&
1020
1.25k
                                                             config::is_cloud_mode()) {
1021
0
        auto size = *index_size + *segment_file_size;
1022
0
        auto holder = cache_builder->allocate_cache_holder(index_start, size, _tablet->tablet_id());
1023
0
        for (auto& segment : holder->file_blocks) {
1024
0
            static_cast<void>(segment->change_cache_type(io::FileCacheType::INDEX));
1025
0
        }
1026
0
    }
1027
1.25k
    return Status::OK();
1028
1.25k
}
1029
1030
7.78k
void SegmentWriter::clear() {
1031
15.9k
    for (auto& column_writer : _column_writers) {
1032
15.9k
        column_writer.reset();
1033
15.9k
    }
1034
7.78k
    _column_writers.clear();
1035
7.78k
    _column_ids.clear();
1036
7.78k
    _olap_data_convertor.reset();
1037
7.78k
}
1038
1039
// write column data to file one by one
1040
7.75k
Status SegmentWriter::_write_data() {
1041
15.9k
    for (auto& column_writer : _column_writers) {
1042
15.9k
        RETURN_IF_ERROR(column_writer->write_data());
1043
1044
15.9k
        auto* column_meta = column_writer->get_column_meta();
1045
15.9k
        DCHECK(column_meta != nullptr);
1046
15.9k
        column_meta->set_compressed_data_bytes(
1047
15.9k
                (column_meta->has_compressed_data_bytes() ? column_meta->compressed_data_bytes()
1048
15.9k
                                                          : 0) +
1049
15.9k
                column_writer->get_total_compressed_data_pages_bytes());
1050
15.9k
        column_meta->set_uncompressed_data_bytes(
1051
15.9k
                (column_meta->has_uncompressed_data_bytes() ? column_meta->uncompressed_data_bytes()
1052
15.9k
                                                            : 0) +
1053
15.9k
                column_writer->get_total_uncompressed_data_pages_bytes());
1054
15.9k
        column_meta->set_raw_data_bytes(
1055
15.9k
                (column_meta->has_raw_data_bytes() ? column_meta->raw_data_bytes() : 0) +
1056
15.9k
                column_writer->get_raw_data_bytes());
1057
15.9k
    }
1058
7.75k
    return Status::OK();
1059
7.75k
}
1060
1061
// write ordinal index after data has been written
1062
7.75k
Status SegmentWriter::_write_ordinal_index() {
1063
15.9k
    for (auto& column_writer : _column_writers) {
1064
15.9k
        RETURN_IF_ERROR(column_writer->write_ordinal_index());
1065
15.9k
    }
1066
7.75k
    return Status::OK();
1067
7.75k
}
1068
1069
7.75k
Status SegmentWriter::_write_zone_map() {
1070
15.9k
    for (auto& column_writer : _column_writers) {
1071
15.9k
        RETURN_IF_ERROR(column_writer->write_zone_map());
1072
15.9k
    }
1073
7.75k
    return Status::OK();
1074
7.75k
}
1075
1076
7.75k
Status SegmentWriter::_write_inverted_index() {
1077
15.9k
    for (auto& column_writer : _column_writers) {
1078
15.9k
        RETURN_IF_ERROR(column_writer->write_inverted_index());
1079
15.9k
    }
1080
7.75k
    return Status::OK();
1081
7.75k
}
1082
1083
7.75k
Status SegmentWriter::_write_ann_index() {
1084
15.9k
    for (auto& column_writer : _column_writers) {
1085
15.9k
        RETURN_IF_ERROR(column_writer->write_ann_index());
1086
15.9k
    }
1087
7.75k
    return Status::OK();
1088
7.75k
}
1089
1090
7.75k
Status SegmentWriter::_write_bloom_filter_index() {
1091
15.9k
    for (auto& column_writer : _column_writers) {
1092
15.9k
        RETURN_IF_ERROR(column_writer->write_bloom_filter_index());
1093
15.9k
    }
1094
7.75k
    return Status::OK();
1095
7.75k
}
1096
1097
5.19k
Status SegmentWriter::_write_short_key_index() {
1098
5.19k
    std::vector<Slice> body;
1099
5.19k
    PageFooterPB footer;
1100
5.19k
    RETURN_IF_ERROR(_short_key_index_builder->finalize(_row_count, &body, &footer));
1101
5.19k
    PagePointer pp;
1102
    // short key index page is not compressed right now
1103
5.19k
    RETURN_IF_ERROR(PageIO::write_page(_file_writer, body, footer, &pp));
1104
5.19k
    pp.to_proto(_footer.mutable_short_key_index_page());
1105
5.19k
    return Status::OK();
1106
5.19k
}
1107
1108
69
Status SegmentWriter::_write_primary_key_index() {
1109
69
    CHECK_EQ(_primary_key_index_builder->num_rows(), _row_count);
1110
69
    return _primary_key_index_builder->finalize(_footer.mutable_primary_key_index_meta());
1111
69
}
1112
1113
5.26k
Status SegmentWriter::_write_footer() {
1114
5.26k
    _footer.set_num_rows(_row_count);
1115
    // Decide whether to externalize ColumnMetaPB by tablet default, and stamp footer version
1116
5.26k
    if (_tablet_schema->is_external_segment_column_meta_used()) {
1117
71
        _footer.set_version(SEGMENT_FOOTER_VERSION_V3_EXT_COL_META);
1118
71
        VLOG_DEBUG << "use external column meta";
1119
        // External ColumnMetaPB writing (optional)
1120
71
        RETURN_IF_ERROR(ExternalColMetaUtil::write_external_column_meta(
1121
71
                _file_writer, &_footer, _opts.compression_type,
1122
71
                [this](const std::vector<Slice>& slices) { return _write_raw_data(slices); }));
1123
71
    }
1124
1125
    // Footer := SegmentFooterPB, FooterPBSize(4), FooterPBChecksum(4), MagicNumber(4)
1126
5.26k
    std::string footer_buf;
1127
5.26k
    VLOG_DEBUG << "footer " << _footer.DebugString();
1128
5.26k
    if (!_footer.SerializeToString(&footer_buf)) {
1129
0
        return Status::InternalError("failed to serialize segment footer");
1130
0
    }
1131
1132
5.26k
    faststring fixed_buf;
1133
    // footer's size
1134
5.26k
    put_fixed32_le(&fixed_buf, cast_set<uint32_t>(footer_buf.size()));
1135
    // footer's checksum
1136
5.26k
    uint32_t checksum = crc32c::Crc32c(footer_buf.data(), footer_buf.size());
1137
5.26k
    put_fixed32_le(&fixed_buf, checksum);
1138
    // Append magic number. we don't write magic number in the header because
1139
    // that will need an extra seek when reading
1140
5.26k
    fixed_buf.append(k_segment_magic, k_segment_magic_length);
1141
1142
5.26k
    std::vector<Slice> slices {footer_buf, fixed_buf};
1143
5.26k
    return _write_raw_data(slices);
1144
5.26k
}
1145
1146
6.99k
Status SegmentWriter::_write_raw_data(const std::vector<Slice>& slices) {
1147
6.99k
    RETURN_IF_ERROR(_file_writer->appendv(&slices[0], slices.size()));
1148
6.99k
    return Status::OK();
1149
6.99k
}
1150
1151
5.25k
Slice SegmentWriter::min_encoded_key() {
1152
5.25k
    return (_primary_key_index_builder == nullptr) ? Slice(_min_key.data(), _min_key.size())
1153
5.25k
                                                   : _primary_key_index_builder->min_key();
1154
5.25k
}
1155
5.25k
Slice SegmentWriter::max_encoded_key() {
1156
5.25k
    return (_primary_key_index_builder == nullptr) ? Slice(_max_key.data(), _max_key.size())
1157
5.25k
                                                   : _primary_key_index_builder->max_key();
1158
5.25k
}
1159
1160
203
void SegmentWriter::set_min_max_key(const Slice& key) {
1161
203
    if (UNLIKELY(_is_first_row)) {
1162
5
        _min_key.append(key.get_data(), key.get_size());
1163
5
        _is_first_row = false;
1164
5
    }
1165
203
    if (key.compare(_max_key) > 0) {
1166
203
        _max_key.clear();
1167
203
        _max_key.append(key.get_data(), key.get_size());
1168
203
    }
1169
203
}
1170
1171
6.38k
void SegmentWriter::set_min_key(const Slice& key) {
1172
6.38k
    if (UNLIKELY(_is_first_row)) {
1173
5.18k
        _min_key.append(key.get_data(), key.get_size());
1174
5.18k
        _is_first_row = false;
1175
5.18k
    }
1176
6.38k
}
1177
1178
6.38k
void SegmentWriter::set_max_key(const Slice& key) {
1179
6.38k
    _max_key.clear();
1180
6.38k
    _max_key.append(key.get_data(), key.get_size());
1181
6.38k
}
1182
1183
0
void SegmentWriter::set_mow_context(std::shared_ptr<MowContext> mow_context) {
1184
0
    _mow_context = mow_context;
1185
0
}
1186
1187
Status SegmentWriter::_generate_primary_key_index(
1188
        const std::vector<const KeyCoder*>& primary_key_coders,
1189
        const std::vector<IOlapColumnDataAccessor*>& primary_key_columns,
1190
10
        IOlapColumnDataAccessor* seq_column, size_t num_rows, bool need_sort) {
1191
10
    if (!need_sort) { // mow table without cluster key
1192
10
        std::string last_key;
1193
810
        for (size_t pos = 0; pos < num_rows; pos++) {
1194
            // use _key_coders
1195
800
            std::string key = _full_encode_keys(primary_key_columns, pos);
1196
800
            _maybe_invalid_row_cache(key);
1197
800
            if (_tablet_schema->has_sequence_col()) {
1198
0
                _encode_seq_column(seq_column, pos, &key);
1199
0
            }
1200
800
            DCHECK(key.compare(last_key) > 0)
1201
0
                    << "found duplicate key or key is not sorted! current key: " << key
1202
0
                    << ", last key: " << last_key;
1203
800
            RETURN_IF_ERROR(_primary_key_index_builder->add_item(key));
1204
800
            last_key = std::move(key);
1205
800
        }
1206
10
    } else { // mow table with cluster key
1207
        // generate primary keys in memory
1208
0
        for (uint32_t pos = 0; pos < num_rows; pos++) {
1209
0
            std::string key = _full_encode_keys(primary_key_coders, primary_key_columns, pos);
1210
0
            _maybe_invalid_row_cache(key);
1211
0
            if (_tablet_schema->has_sequence_col()) {
1212
0
                _encode_seq_column(seq_column, pos, &key);
1213
0
            }
1214
0
            _encode_rowid(pos + _num_rows_written, &key);
1215
0
            _primary_keys_size += key.size();
1216
0
            _primary_keys.emplace_back(std::move(key));
1217
0
        }
1218
0
    }
1219
10
    return Status::OK();
1220
10
}
1221
1222
Status SegmentWriter::_generate_short_key_index(std::vector<IOlapColumnDataAccessor*>& key_columns,
1223
                                                size_t num_rows,
1224
6.38k
                                                const std::vector<size_t>& short_key_pos) {
1225
    // use _key_coders
1226
6.38k
    set_min_key(_full_encode_keys(key_columns, 0));
1227
6.38k
    set_max_key(_full_encode_keys(key_columns, num_rows - 1));
1228
6.38k
    DCHECK(Slice(_max_key.data(), _max_key.size())
1229
0
                   .compare(Slice(_min_key.data(), _min_key.size())) >= 0)
1230
0
            << "key is not sorted! min key: " << _min_key << ", max key: " << _max_key;
1231
1232
6.38k
    key_columns.resize(_num_short_key_columns);
1233
6.38k
    std::string last_key;
1234
13.1k
    for (const auto pos : short_key_pos) {
1235
13.1k
        std::string key = _encode_keys(key_columns, pos);
1236
13.1k
        DCHECK(key.compare(last_key) >= 0)
1237
0
                << "key is not sorted! current key: " << key << ", last key: " << last_key;
1238
13.1k
        RETURN_IF_ERROR(_short_key_index_builder->add_item(key));
1239
13.1k
        last_key = std::move(key);
1240
13.1k
    }
1241
6.38k
    return Status::OK();
1242
6.38k
}
1243
1244
302k
inline bool SegmentWriter::_is_mow() {
1245
302k
    return _tablet_schema->keys_type() == UNIQUE_KEYS && _opts.enable_unique_key_merge_on_write;
1246
302k
}
1247
1248
148k
inline bool SegmentWriter::_is_mow_with_cluster_key() {
1249
148k
    return _is_mow() && !_tablet_schema->cluster_key_uids().empty();
1250
148k
}
1251
1252
#include "common/compile_check_end.h"
1253
1254
} // namespace segment_v2
1255
} // namespace doris