/root/doris/be/src/olap/schema_change.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "olap/schema_change.h" |
19 | | |
20 | | #include <gen_cpp/olap_file.pb.h> |
21 | | #include <glog/logging.h> |
22 | | #include <thrift/protocol/TDebugProtocol.h> |
23 | | |
24 | | #include <algorithm> |
25 | | #include <exception> |
26 | | #include <map> |
27 | | #include <memory> |
28 | | #include <mutex> |
29 | | #include <roaring/roaring.hh> |
30 | | #include <tuple> |
31 | | #include <utility> |
32 | | |
33 | | #include "agent/be_exec_version_manager.h" |
34 | | #include "cloud/cloud_schema_change_job.h" |
35 | | #include "cloud/config.h" |
36 | | #include "common/consts.h" |
37 | | #include "common/logging.h" |
38 | | #include "common/signal_handler.h" |
39 | | #include "common/status.h" |
40 | | #include "exec/schema_scanner/schema_metadata_name_ids_scanner.h" |
41 | | #include "gutil/strings/numbers.h" |
42 | | #include "io/fs/file_system.h" |
43 | | #include "io/io_common.h" |
44 | | #include "olap/base_tablet.h" |
45 | | #include "olap/data_dir.h" |
46 | | #include "olap/delete_handler.h" |
47 | | #include "olap/field.h" |
48 | | #include "olap/iterators.h" |
49 | | #include "olap/merger.h" |
50 | | #include "olap/olap_common.h" |
51 | | #include "olap/olap_define.h" |
52 | | #include "olap/rowset/beta_rowset.h" |
53 | | #include "olap/rowset/pending_rowset_helper.h" |
54 | | #include "olap/rowset/rowset_meta.h" |
55 | | #include "olap/rowset/rowset_reader_context.h" |
56 | | #include "olap/rowset/rowset_writer_context.h" |
57 | | #include "olap/rowset/segment_v2/column_reader.h" |
58 | | #include "olap/rowset/segment_v2/inverted_index_desc.h" |
59 | | #include "olap/rowset/segment_v2/inverted_index_writer.h" |
60 | | #include "olap/rowset/segment_v2/segment.h" |
61 | | #include "olap/schema.h" |
62 | | #include "olap/segment_loader.h" |
63 | | #include "olap/storage_engine.h" |
64 | | #include "olap/tablet.h" |
65 | | #include "olap/tablet_fwd.h" |
66 | | #include "olap/tablet_manager.h" |
67 | | #include "olap/tablet_meta.h" |
68 | | #include "olap/tablet_schema.h" |
69 | | #include "olap/types.h" |
70 | | #include "olap/utils.h" |
71 | | #include "olap/wrapper_field.h" |
72 | | #include "runtime/exec_env.h" |
73 | | #include "runtime/memory/mem_tracker.h" |
74 | | #include "runtime/runtime_state.h" |
75 | | #include "util/debug_points.h" |
76 | | #include "util/defer_op.h" |
77 | | #include "util/trace.h" |
78 | | #include "vec/aggregate_functions/aggregate_function.h" |
79 | | #include "vec/aggregate_functions/aggregate_function_reader.h" |
80 | | #include "vec/columns/column.h" |
81 | | #include "vec/columns/column_nullable.h" |
82 | | #include "vec/common/assert_cast.h" |
83 | | #include "vec/common/schema_util.h" |
84 | | #include "vec/core/block.h" |
85 | | #include "vec/core/column_with_type_and_name.h" |
86 | | #include "vec/exprs/vexpr.h" |
87 | | #include "vec/exprs/vexpr_context.h" |
88 | | #include "vec/olap/olap_data_convertor.h" |
89 | | |
90 | | namespace doris { |
91 | | class CollectionValue; |
92 | | |
93 | | using namespace ErrorCode; |
94 | | |
95 | | constexpr int ALTER_TABLE_BATCH_SIZE = 4064; |
96 | | |
97 | | class MultiBlockMerger { |
98 | | public: |
99 | 0 | MultiBlockMerger(BaseTabletSPtr tablet) : _tablet(tablet), _cmp(*tablet) {} |
100 | | |
101 | | Status merge(const std::vector<std::unique_ptr<vectorized::Block>>& blocks, |
102 | 0 | RowsetWriter* rowset_writer, uint64_t* merged_rows) { |
103 | 0 | int rows = 0; |
104 | 0 | for (const auto& block : blocks) { |
105 | 0 | rows += block->rows(); |
106 | 0 | } |
107 | 0 | if (!rows) { |
108 | 0 | return Status::OK(); |
109 | 0 | } |
110 | | |
111 | 0 | std::vector<RowRef> row_refs; |
112 | 0 | row_refs.reserve(rows); |
113 | 0 | for (const auto& block : blocks) { |
114 | 0 | for (uint16_t i = 0; i < block->rows(); i++) { |
115 | 0 | row_refs.emplace_back(block.get(), i); |
116 | 0 | } |
117 | 0 | } |
118 | | // TODO: try to use pdqsort to replace std::sort |
119 | | // The block version is incremental. |
120 | 0 | std::stable_sort(row_refs.begin(), row_refs.end(), _cmp); |
121 | |
|
122 | 0 | auto finalized_block = _tablet->tablet_schema()->create_block(); |
123 | 0 | int columns = finalized_block.columns(); |
124 | 0 | *merged_rows += rows; |
125 | |
|
126 | 0 | if (_tablet->keys_type() == KeysType::AGG_KEYS) { |
127 | 0 | auto tablet_schema = _tablet->tablet_schema(); |
128 | 0 | int key_number = _tablet->num_key_columns(); |
129 | |
|
130 | 0 | std::vector<vectorized::AggregateFunctionPtr> agg_functions; |
131 | 0 | std::vector<vectorized::AggregateDataPtr> agg_places; |
132 | |
|
133 | 0 | for (int i = key_number; i < columns; i++) { |
134 | 0 | try { |
135 | 0 | vectorized::AggregateFunctionPtr function = |
136 | 0 | tablet_schema->column(i).get_aggregate_function( |
137 | 0 | vectorized::AGG_LOAD_SUFFIX, |
138 | 0 | tablet_schema->column(i).get_be_exec_version()); |
139 | 0 | if (!function) { |
140 | 0 | return Status::InternalError( |
141 | 0 | "could not find aggregate function on column {}, aggregation={}", |
142 | 0 | tablet_schema->column(i).name(), |
143 | 0 | tablet_schema->column(i).aggregation()); |
144 | 0 | } |
145 | 0 | agg_functions.push_back(function); |
146 | | // create aggregate data |
147 | 0 | auto* place = new char[function->size_of_data()]; |
148 | 0 | function->create(place); |
149 | 0 | agg_places.push_back(place); |
150 | 0 | } catch (...) { |
151 | 0 | for (int j = 0; j < i - key_number; ++j) { |
152 | 0 | agg_functions[j]->destroy(agg_places[j]); |
153 | 0 | delete[] agg_places[j]; |
154 | 0 | } |
155 | 0 | throw; |
156 | 0 | } |
157 | 0 | } |
158 | | |
159 | 0 | DEFER({ |
160 | 0 | for (int i = 0; i < columns - key_number; i++) { |
161 | 0 | agg_functions[i]->destroy(agg_places[i]); |
162 | 0 | delete[] agg_places[i]; |
163 | 0 | } |
164 | 0 | }); |
165 | |
|
166 | 0 | for (int i = 0; i < rows; i++) { |
167 | 0 | auto row_ref = row_refs[i]; |
168 | 0 | for (int j = key_number; j < columns; j++) { |
169 | 0 | const auto* column_ptr = row_ref.get_column(j).get(); |
170 | 0 | agg_functions[j - key_number]->add( |
171 | 0 | agg_places[j - key_number], |
172 | 0 | const_cast<const vectorized::IColumn**>(&column_ptr), row_ref.position, |
173 | 0 | &_arena); |
174 | 0 | } |
175 | |
|
176 | 0 | if (i == rows - 1 || _cmp.compare(row_refs[i], row_refs[i + 1])) { |
177 | 0 | for (int j = 0; j < key_number; j++) { |
178 | 0 | finalized_block.get_by_position(j).column->assume_mutable()->insert_from( |
179 | 0 | *row_ref.get_column(j), row_ref.position); |
180 | 0 | } |
181 | |
|
182 | 0 | for (int j = key_number; j < columns; j++) { |
183 | 0 | agg_functions[j - key_number]->insert_result_into( |
184 | 0 | agg_places[j - key_number], |
185 | 0 | finalized_block.get_by_position(j).column->assume_mutable_ref()); |
186 | 0 | agg_functions[j - key_number]->reset(agg_places[j - key_number]); |
187 | 0 | } |
188 | |
|
189 | 0 | if (i == rows - 1 || finalized_block.rows() == ALTER_TABLE_BATCH_SIZE) { |
190 | 0 | *merged_rows -= finalized_block.rows(); |
191 | 0 | RETURN_IF_ERROR(rowset_writer->add_block(&finalized_block)); |
192 | 0 | finalized_block.clear_column_data(); |
193 | 0 | } |
194 | 0 | } |
195 | 0 | } |
196 | 0 | } else { |
197 | 0 | std::vector<RowRef> pushed_row_refs; |
198 | 0 | if (_tablet->keys_type() == KeysType::DUP_KEYS) { |
199 | 0 | std::swap(pushed_row_refs, row_refs); |
200 | 0 | } else if (_tablet->keys_type() == KeysType::UNIQUE_KEYS) { |
201 | 0 | for (int i = 0; i < rows; i++) { |
202 | 0 | if (i == rows - 1 || _cmp.compare(row_refs[i], row_refs[i + 1])) { |
203 | 0 | pushed_row_refs.push_back(row_refs[i]); |
204 | 0 | } |
205 | 0 | } |
206 | 0 | if (!_tablet->tablet_schema()->cluster_key_uids().empty()) { |
207 | 0 | std::vector<uint32_t> ids; |
208 | 0 | for (const auto& cid : _tablet->tablet_schema()->cluster_key_uids()) { |
209 | 0 | auto index = _tablet->tablet_schema()->field_index(cid); |
210 | 0 | if (index == -1) { |
211 | 0 | return Status::InternalError( |
212 | 0 | "could not find cluster key column with unique_id=" + |
213 | 0 | std::to_string(cid) + " in tablet schema"); |
214 | 0 | } |
215 | 0 | ids.push_back(index); |
216 | 0 | } |
217 | | // sort by cluster key |
218 | 0 | std::stable_sort(pushed_row_refs.begin(), pushed_row_refs.end(), |
219 | 0 | ClusterKeyRowRefComparator(ids)); |
220 | 0 | } |
221 | 0 | } |
222 | | |
223 | | // update real inserted row number |
224 | 0 | rows = pushed_row_refs.size(); |
225 | 0 | *merged_rows -= rows; |
226 | |
|
227 | 0 | for (int i = 0; i < rows; i += ALTER_TABLE_BATCH_SIZE) { |
228 | 0 | int limit = std::min(ALTER_TABLE_BATCH_SIZE, rows - i); |
229 | |
|
230 | 0 | for (int idx = 0; idx < columns; idx++) { |
231 | 0 | auto column = finalized_block.get_by_position(idx).column->assume_mutable(); |
232 | |
|
233 | 0 | for (int j = 0; j < limit; j++) { |
234 | 0 | auto row_ref = pushed_row_refs[i + j]; |
235 | 0 | column->insert_from(*row_ref.get_column(idx), row_ref.position); |
236 | 0 | } |
237 | 0 | } |
238 | 0 | RETURN_IF_ERROR(rowset_writer->add_block(&finalized_block)); |
239 | 0 | finalized_block.clear_column_data(); |
240 | 0 | } |
241 | 0 | } |
242 | | |
243 | 0 | RETURN_IF_ERROR(rowset_writer->flush()); |
244 | 0 | return Status::OK(); |
245 | 0 | } |
246 | | |
247 | | private: |
248 | | struct RowRef { |
249 | | RowRef(vectorized::Block* block_, uint16_t position_) |
250 | 0 | : block(block_), position(position_) {} |
251 | 0 | vectorized::ColumnPtr get_column(int index) const { |
252 | 0 | return block->get_by_position(index).column; |
253 | 0 | } |
254 | | const vectorized::Block* block; |
255 | | uint16_t position; |
256 | | }; |
257 | | |
258 | | struct RowRefComparator { |
259 | 0 | RowRefComparator(const BaseTablet& tablet) : _num_columns(tablet.num_key_columns()) {} |
260 | | |
261 | 0 | int compare(const RowRef& lhs, const RowRef& rhs) const { |
262 | | // Notice: does not compare sequence column for mow table |
263 | | // read from rowsets with delete bitmap, so there should be no duplicated keys |
264 | 0 | return lhs.block->compare_at(lhs.position, rhs.position, _num_columns, *rhs.block, -1); |
265 | 0 | } |
266 | | |
267 | 0 | bool operator()(const RowRef& lhs, const RowRef& rhs) const { |
268 | 0 | return compare(lhs, rhs) < 0; |
269 | 0 | } |
270 | | |
271 | | const size_t _num_columns; |
272 | | }; |
273 | | |
274 | | struct ClusterKeyRowRefComparator { |
275 | 0 | ClusterKeyRowRefComparator(std::vector<uint32_t> columns) : _columns(columns) {} |
276 | | |
277 | 0 | int compare(const RowRef& lhs, const RowRef& rhs) const { |
278 | 0 | return lhs.block->compare_at(lhs.position, rhs.position, &_columns, *rhs.block, -1); |
279 | 0 | } |
280 | | |
281 | 0 | bool operator()(const RowRef& lhs, const RowRef& rhs) const { |
282 | 0 | return compare(lhs, rhs) < 0; |
283 | 0 | } |
284 | | |
285 | | const std::vector<uint32_t> _columns; |
286 | | }; |
287 | | |
288 | | BaseTabletSPtr _tablet; |
289 | | RowRefComparator _cmp; |
290 | | vectorized::Arena _arena; |
291 | | }; |
292 | | |
293 | | BlockChanger::BlockChanger(TabletSchemaSPtr tablet_schema, DescriptorTbl desc_tbl) |
294 | 0 | : _desc_tbl(std::move(desc_tbl)) { |
295 | 0 | _schema_mapping.resize(tablet_schema->num_columns()); |
296 | 0 | } |
297 | | |
298 | 0 | BlockChanger::~BlockChanger() { |
299 | 0 | for (auto it = _schema_mapping.begin(); it != _schema_mapping.end(); ++it) { |
300 | 0 | SAFE_DELETE(it->default_value); |
301 | 0 | } |
302 | 0 | _schema_mapping.clear(); |
303 | 0 | } |
304 | | |
305 | 0 | ColumnMapping* BlockChanger::get_mutable_column_mapping(size_t column_index) { |
306 | 0 | if (column_index >= _schema_mapping.size()) { |
307 | 0 | return nullptr; |
308 | 0 | } |
309 | | |
310 | 0 | return &(_schema_mapping[column_index]); |
311 | 0 | } |
312 | | |
313 | | Status BlockChanger::change_block(vectorized::Block* ref_block, |
314 | 0 | vectorized::Block* new_block) const { |
315 | 0 | std::unique_ptr<RuntimeState> state = RuntimeState::create_unique(); |
316 | 0 | state->set_desc_tbl(&_desc_tbl); |
317 | 0 | state->set_be_exec_version(_fe_compatible_version); |
318 | 0 | RowDescriptor row_desc = |
319 | 0 | RowDescriptor(_desc_tbl.get_tuple_descriptor(_desc_tbl.get_row_tuples()[0]), false); |
320 | |
|
321 | 0 | if (_where_expr != nullptr) { |
322 | 0 | vectorized::VExprContextSPtr ctx = nullptr; |
323 | 0 | RETURN_IF_ERROR(vectorized::VExpr::create_expr_tree(*_where_expr, ctx)); |
324 | 0 | RETURN_IF_ERROR(ctx->prepare(state.get(), row_desc)); |
325 | 0 | RETURN_IF_ERROR(ctx->open(state.get())); |
326 | | |
327 | 0 | RETURN_IF_ERROR( |
328 | 0 | vectorized::VExprContext::filter_block(ctx.get(), ref_block, ref_block->columns())); |
329 | 0 | } |
330 | | |
331 | 0 | const int row_num = ref_block->rows(); |
332 | 0 | const int new_schema_cols_num = new_block->columns(); |
333 | | |
334 | | // will be used for swaping ref_block[entry.first] and new_block[entry.second] |
335 | 0 | std::list<std::pair<int, int>> swap_idx_list; |
336 | 0 | for (int idx = 0; idx < new_schema_cols_num; idx++) { |
337 | 0 | auto expr = _schema_mapping[idx].expr; |
338 | 0 | if (expr != nullptr) { |
339 | 0 | vectorized::VExprContextSPtr ctx; |
340 | 0 | RETURN_IF_ERROR(vectorized::VExpr::create_expr_tree(*expr, ctx)); |
341 | 0 | RETURN_IF_ERROR(ctx->prepare(state.get(), row_desc)); |
342 | 0 | RETURN_IF_ERROR(ctx->open(state.get())); |
343 | | |
344 | 0 | int result_tmp_column_idx = -1; |
345 | 0 | RETURN_IF_ERROR(ctx->execute(ref_block, &result_tmp_column_idx)); |
346 | 0 | auto& result_tmp_column_def = ref_block->get_by_position(result_tmp_column_idx); |
347 | 0 | if (!result_tmp_column_def.column) { |
348 | 0 | return Status::Error<ErrorCode::INTERNAL_ERROR>( |
349 | 0 | "result column={} is nullptr, input expr={}", result_tmp_column_def.name, |
350 | 0 | apache::thrift::ThriftDebugString(*expr)); |
351 | 0 | } |
352 | 0 | ref_block->replace_by_position_if_const(result_tmp_column_idx); |
353 | |
|
354 | 0 | if (result_tmp_column_def.column->size() != row_num) { |
355 | 0 | return Status::Error<ErrorCode::INTERNAL_ERROR>( |
356 | 0 | "result size invalid, expect={}, real={}; input expr={}, block={}", row_num, |
357 | 0 | result_tmp_column_def.column->size(), |
358 | 0 | apache::thrift::ThriftDebugString(*expr), ref_block->dump_structure()); |
359 | 0 | } |
360 | | |
361 | 0 | if (_type == SCHEMA_CHANGE) { |
362 | | // danger casts (expected to be rejected by upstream caller) may cause data to be null and result in data loss in schema change |
363 | | // for rollup, this check is unecessary, and ref columns are not set in this case, it works on exprs |
364 | | |
365 | | // column_idx in base schema |
366 | 0 | int32_t ref_column_idx = _schema_mapping[idx].ref_column_idx; |
367 | 0 | DCHECK_GE(ref_column_idx, 0); |
368 | 0 | auto& ref_column_def = ref_block->get_by_position(ref_column_idx); |
369 | 0 | RETURN_IF_ERROR( |
370 | 0 | _check_cast_valid(ref_column_def.column, result_tmp_column_def.column)); |
371 | 0 | } |
372 | 0 | swap_idx_list.emplace_back(result_tmp_column_idx, idx); |
373 | 0 | } else if (_schema_mapping[idx].ref_column_idx < 0) { |
374 | | // new column, write default value |
375 | 0 | auto* value = _schema_mapping[idx].default_value; |
376 | 0 | auto column = new_block->get_by_position(idx).column->assume_mutable(); |
377 | 0 | if (value->is_null()) { |
378 | 0 | DCHECK(column->is_nullable()); |
379 | 0 | column->insert_many_defaults(row_num); |
380 | 0 | } else { |
381 | 0 | auto type_info = get_type_info(_schema_mapping[idx].new_column); |
382 | 0 | DefaultValueColumnIterator::insert_default_data(type_info.get(), value->size(), |
383 | 0 | value->ptr(), column, row_num); |
384 | 0 | } |
385 | 0 | } else { |
386 | | // same type, just swap column |
387 | 0 | swap_idx_list.emplace_back(_schema_mapping[idx].ref_column_idx, idx); |
388 | 0 | } |
389 | 0 | } |
390 | | |
391 | 0 | for (auto it : swap_idx_list) { |
392 | 0 | auto& ref_col = ref_block->get_by_position(it.first).column; |
393 | 0 | auto& new_col = new_block->get_by_position(it.second).column; |
394 | |
|
395 | 0 | bool ref_col_nullable = ref_col->is_nullable(); |
396 | 0 | bool new_col_nullable = new_col->is_nullable(); |
397 | |
|
398 | 0 | if (ref_col_nullable != new_col_nullable) { |
399 | | // not nullable to nullable |
400 | 0 | if (new_col_nullable) { |
401 | 0 | auto* new_nullable_col = |
402 | 0 | assert_cast<vectorized::ColumnNullable*>(new_col->assume_mutable().get()); |
403 | |
|
404 | 0 | new_nullable_col->change_nested_column(ref_col); |
405 | 0 | new_nullable_col->get_null_map_data().resize_fill(ref_col->size()); |
406 | 0 | } else { |
407 | | // nullable to not nullable: |
408 | | // suppose column `c_phone` is originally varchar(16) NOT NULL, |
409 | | // then do schema change `alter table test modify column c_phone int not null`, |
410 | | // the cast expr of schema change is `CastExpr(CAST String to Nullable(Int32))`, |
411 | | // so need to handle nullable to not nullable here |
412 | 0 | auto* ref_nullable_col = |
413 | 0 | assert_cast<vectorized::ColumnNullable*>(ref_col->assume_mutable().get()); |
414 | |
|
415 | 0 | new_col = ref_nullable_col->get_nested_column_ptr(); |
416 | 0 | } |
417 | 0 | } else { |
418 | 0 | new_block->get_by_position(it.second).column = |
419 | 0 | ref_block->get_by_position(it.first).column; |
420 | 0 | } |
421 | 0 | } |
422 | 0 | return Status::OK(); |
423 | 0 | } |
424 | | |
425 | | // This check can prevent schema-change from causing data loss after type cast |
426 | | Status BlockChanger::_check_cast_valid(vectorized::ColumnPtr input_column, |
427 | 0 | vectorized::ColumnPtr output_column) { |
428 | 0 | if (input_column->size() != output_column->size()) { |
429 | 0 | return Status::InternalError( |
430 | 0 | "column size is changed, input_column_size={}, output_column_size={}; " |
431 | 0 | "input_column={}", |
432 | 0 | input_column->size(), output_column->size(), input_column->get_name()); |
433 | 0 | } |
434 | 0 | DCHECK_EQ(input_column->size(), output_column->size()) |
435 | 0 | << "length check should have done before calling this function!"; |
436 | |
|
437 | 0 | if (input_column->is_nullable() != output_column->is_nullable()) { |
438 | 0 | if (input_column->is_nullable()) { |
439 | 0 | const auto* ref_null_map = |
440 | 0 | vectorized::check_and_get_column<vectorized::ColumnNullable>(input_column.get()) |
441 | 0 | ->get_null_map_column() |
442 | 0 | .get_data() |
443 | 0 | .data(); |
444 | |
|
445 | 0 | bool is_changed = false; |
446 | 0 | for (size_t i = 0; i < input_column->size(); i++) { |
447 | 0 | is_changed |= ref_null_map[i]; |
448 | 0 | } |
449 | 0 | if (is_changed) { |
450 | 0 | return Status::DataQualityError( |
451 | 0 | "some null data is changed to not null, intput_column={}", |
452 | 0 | input_column->get_name()); |
453 | 0 | } |
454 | 0 | } else { |
455 | 0 | const auto& null_map_column = |
456 | 0 | vectorized::check_and_get_column<vectorized::ColumnNullable>( |
457 | 0 | output_column.get()) |
458 | 0 | ->get_null_map_column(); |
459 | 0 | const auto& nested_column = |
460 | 0 | vectorized::check_and_get_column<vectorized::ColumnNullable>( |
461 | 0 | output_column.get()) |
462 | 0 | ->get_nested_column(); |
463 | 0 | const auto* new_null_map = null_map_column.get_data().data(); |
464 | |
|
465 | 0 | if (null_map_column.size() != output_column->size()) { |
466 | 0 | return Status::InternalError( |
467 | 0 | "null_map_column size mismatch output_column_size, " |
468 | 0 | "null_map_column_size={}, output_column_size={}; input_column={}", |
469 | 0 | null_map_column.size(), output_column->size(), input_column->get_name()); |
470 | 0 | } |
471 | | |
472 | 0 | if (nested_column.size() != output_column->size()) { |
473 | 0 | return Status::InternalError( |
474 | 0 | "nested_column size is changed, nested_column_size={}, " |
475 | 0 | "ouput_column_size={}; input_column={}", |
476 | 0 | nested_column.size(), output_column->size(), input_column->get_name()); |
477 | 0 | } |
478 | | |
479 | 0 | bool is_changed = false; |
480 | 0 | for (size_t i = 0; i < input_column->size(); i++) { |
481 | 0 | is_changed |= new_null_map[i]; |
482 | 0 | } |
483 | 0 | if (is_changed) { |
484 | 0 | return Status::DataQualityError( |
485 | 0 | "some not null data is changed to null, intput_column={}", |
486 | 0 | input_column->get_name()); |
487 | 0 | } |
488 | 0 | } |
489 | 0 | } |
490 | | |
491 | 0 | if (input_column->is_nullable() && output_column->is_nullable()) { |
492 | 0 | const auto* ref_null_map = |
493 | 0 | vectorized::check_and_get_column<vectorized::ColumnNullable>(input_column.get()) |
494 | 0 | ->get_null_map_column() |
495 | 0 | .get_data() |
496 | 0 | .data(); |
497 | 0 | const auto* new_null_map = |
498 | 0 | vectorized::check_and_get_column<vectorized::ColumnNullable>(output_column.get()) |
499 | 0 | ->get_null_map_column() |
500 | 0 | .get_data() |
501 | 0 | .data(); |
502 | |
|
503 | 0 | bool is_changed = false; |
504 | 0 | for (size_t i = 0; i < input_column->size(); i++) { |
505 | 0 | is_changed |= (ref_null_map[i] != new_null_map[i]); |
506 | 0 | } |
507 | 0 | if (is_changed) { |
508 | 0 | return Status::DataQualityError( |
509 | 0 | "null map is changed after calculation, input_column={}", |
510 | 0 | input_column->get_name()); |
511 | 0 | } |
512 | 0 | } |
513 | 0 | return Status::OK(); |
514 | 0 | } |
515 | | |
516 | | Status LinkedSchemaChange::process(RowsetReaderSharedPtr rowset_reader, RowsetWriter* rowset_writer, |
517 | | BaseTabletSPtr new_tablet, BaseTabletSPtr base_tablet, |
518 | | TabletSchemaSPtr base_tablet_schema, |
519 | 0 | TabletSchemaSPtr new_tablet_schema) { |
520 | 0 | Status status = rowset_writer->add_rowset_for_linked_schema_change(rowset_reader->rowset()); |
521 | 0 | if (!status) { |
522 | 0 | LOG(WARNING) << "fail to convert rowset." |
523 | 0 | << ", new_tablet=" << new_tablet->tablet_id() |
524 | 0 | << ", version=" << rowset_writer->version().first << "-" |
525 | 0 | << rowset_writer->version().second << ", error status " << status; |
526 | 0 | return status; |
527 | 0 | } |
528 | | // copy delete bitmap to new tablet. |
529 | 0 | if (new_tablet->keys_type() == UNIQUE_KEYS && new_tablet->enable_unique_key_merge_on_write()) { |
530 | 0 | DeleteBitmap origin_delete_bitmap(base_tablet->tablet_id()); |
531 | 0 | base_tablet->tablet_meta()->delete_bitmap().subset( |
532 | 0 | {rowset_reader->rowset()->rowset_id(), 0, 0}, |
533 | 0 | {rowset_reader->rowset()->rowset_id(), UINT32_MAX, INT64_MAX}, |
534 | 0 | &origin_delete_bitmap); |
535 | 0 | for (auto& iter : origin_delete_bitmap.delete_bitmap) { |
536 | 0 | int ret = new_tablet->tablet_meta()->delete_bitmap().set( |
537 | 0 | {rowset_writer->rowset_id(), std::get<1>(iter.first), std::get<2>(iter.first)}, |
538 | 0 | iter.second); |
539 | 0 | DCHECK(ret == 1); |
540 | 0 | } |
541 | 0 | } |
542 | 0 | return Status::OK(); |
543 | 0 | } |
544 | | |
545 | | Status VSchemaChangeDirectly::_inner_process(RowsetReaderSharedPtr rowset_reader, |
546 | | RowsetWriter* rowset_writer, BaseTabletSPtr new_tablet, |
547 | | TabletSchemaSPtr base_tablet_schema, |
548 | 0 | TabletSchemaSPtr new_tablet_schema) { |
549 | 0 | bool eof = false; |
550 | 0 | do { |
551 | 0 | auto new_block = vectorized::Block::create_unique(new_tablet_schema->create_block()); |
552 | 0 | auto ref_block = vectorized::Block::create_unique(base_tablet_schema->create_block()); |
553 | |
|
554 | 0 | auto st = rowset_reader->next_block(ref_block.get()); |
555 | 0 | if (!st) { |
556 | 0 | if (st.is<ErrorCode::END_OF_FILE>()) { |
557 | 0 | if (ref_block->rows() == 0) { |
558 | 0 | break; |
559 | 0 | } else { |
560 | 0 | eof = true; |
561 | 0 | } |
562 | 0 | } else { |
563 | 0 | return st; |
564 | 0 | } |
565 | 0 | } |
566 | | |
567 | 0 | RETURN_IF_ERROR(_changer.change_block(ref_block.get(), new_block.get())); |
568 | 0 | RETURN_IF_ERROR(rowset_writer->add_block(new_block.get())); |
569 | 0 | } while (!eof); |
570 | | |
571 | 0 | RETURN_IF_ERROR(rowset_writer->flush()); |
572 | 0 | return Status::OK(); |
573 | 0 | } |
574 | | |
575 | | VBaseSchemaChangeWithSorting::VBaseSchemaChangeWithSorting(const BlockChanger& changer, |
576 | | size_t memory_limitation) |
577 | | : _changer(changer), |
578 | | _memory_limitation(memory_limitation), |
579 | 0 | _temp_delta_versions(Version::mock()) { |
580 | 0 | _mem_tracker = std::make_unique<MemTracker>( |
581 | 0 | fmt::format("VSchemaChangeWithSorting:changer={}", std::to_string(int64(&changer)))); |
582 | 0 | } |
583 | | |
584 | | Status VBaseSchemaChangeWithSorting::_inner_process(RowsetReaderSharedPtr rowset_reader, |
585 | | RowsetWriter* rowset_writer, |
586 | | BaseTabletSPtr new_tablet, |
587 | | TabletSchemaSPtr base_tablet_schema, |
588 | 0 | TabletSchemaSPtr new_tablet_schema) { |
589 | | // for internal sorting |
590 | 0 | std::vector<std::unique_ptr<vectorized::Block>> blocks; |
591 | |
|
592 | 0 | RowsetSharedPtr rowset = rowset_reader->rowset(); |
593 | 0 | SegmentsOverlapPB segments_overlap = rowset->rowset_meta()->segments_overlap(); |
594 | 0 | int64_t newest_write_timestamp = rowset->newest_write_timestamp(); |
595 | 0 | _temp_delta_versions.first = _temp_delta_versions.second; |
596 | 0 | _src_rowsets.clear(); // init _src_rowsets |
597 | 0 | auto create_rowset = [&]() -> Status { |
598 | 0 | if (blocks.empty()) { |
599 | 0 | return Status::OK(); |
600 | 0 | } |
601 | | |
602 | 0 | auto rowset = DORIS_TRY(_internal_sorting( |
603 | 0 | blocks, Version(_temp_delta_versions.second, _temp_delta_versions.second + 1), |
604 | 0 | newest_write_timestamp, new_tablet, BETA_ROWSET, segments_overlap, |
605 | 0 | new_tablet_schema)); |
606 | 0 | _src_rowsets.push_back(std::move(rowset)); |
607 | 0 | for (auto& block : blocks) { |
608 | 0 | _mem_tracker->release(block->allocated_bytes()); |
609 | 0 | } |
610 | 0 | blocks.clear(); |
611 | | |
612 | | // increase temp version |
613 | 0 | _temp_delta_versions.second += 2; |
614 | 0 | return Status::OK(); |
615 | 0 | }; |
616 | |
|
617 | 0 | auto new_block = vectorized::Block::create_unique(new_tablet_schema->create_block()); |
618 | |
|
619 | 0 | bool eof = false; |
620 | 0 | do { |
621 | 0 | auto ref_block = vectorized::Block::create_unique(base_tablet_schema->create_block()); |
622 | 0 | auto st = rowset_reader->next_block(ref_block.get()); |
623 | 0 | if (!st) { |
624 | 0 | if (st.is<ErrorCode::END_OF_FILE>()) { |
625 | 0 | if (ref_block->rows() == 0) { |
626 | 0 | break; |
627 | 0 | } else { |
628 | 0 | eof = true; |
629 | 0 | } |
630 | 0 | } else { |
631 | 0 | return st; |
632 | 0 | } |
633 | 0 | } |
634 | | |
635 | 0 | RETURN_IF_ERROR(_changer.change_block(ref_block.get(), new_block.get())); |
636 | | |
637 | 0 | constexpr double HOLD_BLOCK_MEMORY_RATE = |
638 | 0 | 0.66; // Reserve some memory for use by other parts of this job |
639 | 0 | if (_mem_tracker->consumption() + new_block->allocated_bytes() > _memory_limitation || |
640 | 0 | _mem_tracker->consumption() > _memory_limitation * HOLD_BLOCK_MEMORY_RATE || |
641 | 0 | DebugPoints::instance()->is_enable( |
642 | 0 | "VBaseSchemaChangeWithSorting._inner_process.create_rowset")) { |
643 | 0 | RETURN_IF_ERROR(create_rowset()); |
644 | | |
645 | 0 | if (_mem_tracker->consumption() + new_block->allocated_bytes() > _memory_limitation) { |
646 | 0 | return Status::Error<INVALID_ARGUMENT>( |
647 | 0 | "Memory limitation is too small for Schema Change. _memory_limitation={}, " |
648 | 0 | "new_block->allocated_bytes()={}, consumption={}", |
649 | 0 | _memory_limitation, new_block->allocated_bytes(), |
650 | 0 | _mem_tracker->consumption()); |
651 | 0 | } |
652 | 0 | } |
653 | 0 | _mem_tracker->consume(new_block->allocated_bytes()); |
654 | | |
655 | | // move unique ptr |
656 | 0 | blocks.push_back(vectorized::Block::create_unique(new_tablet_schema->create_block())); |
657 | 0 | swap(blocks.back(), new_block); |
658 | 0 | } while (!eof); |
659 | | |
660 | 0 | RETURN_IF_ERROR(create_rowset()); |
661 | | |
662 | 0 | if (_src_rowsets.empty()) { |
663 | 0 | RETURN_IF_ERROR(rowset_writer->flush()); |
664 | 0 | } else { |
665 | 0 | RETURN_IF_ERROR( |
666 | 0 | _external_sorting(_src_rowsets, rowset_writer, new_tablet, new_tablet_schema)); |
667 | 0 | } |
668 | | |
669 | 0 | return Status::OK(); |
670 | 0 | } |
671 | | |
672 | | Result<RowsetSharedPtr> VBaseSchemaChangeWithSorting::_internal_sorting( |
673 | | const std::vector<std::unique_ptr<vectorized::Block>>& blocks, const Version& version, |
674 | | int64_t newest_write_timestamp, BaseTabletSPtr new_tablet, RowsetTypePB new_rowset_type, |
675 | 0 | SegmentsOverlapPB segments_overlap, TabletSchemaSPtr new_tablet_schema) { |
676 | 0 | uint64_t merged_rows = 0; |
677 | 0 | MultiBlockMerger merger(new_tablet); |
678 | 0 | RowsetWriterContext context; |
679 | 0 | context.version = version; |
680 | 0 | context.rowset_state = VISIBLE; |
681 | 0 | context.segments_overlap = segments_overlap; |
682 | 0 | context.tablet_schema = new_tablet_schema; |
683 | 0 | context.newest_write_timestamp = newest_write_timestamp; |
684 | 0 | context.write_type = DataWriteType::TYPE_SCHEMA_CHANGE; |
685 | 0 | std::unique_ptr<RowsetWriter> rowset_writer; |
686 | | // TODO(plat1ko): Use monad op |
687 | 0 | if (auto result = new_tablet->create_rowset_writer(context, false); !result.has_value()) |
688 | 0 | [[unlikely]] { |
689 | 0 | return unexpected(std::move(result).error()); |
690 | 0 | } else { |
691 | 0 | rowset_writer = std::move(result).value(); |
692 | 0 | } |
693 | 0 | RETURN_IF_ERROR_RESULT(merger.merge(blocks, rowset_writer.get(), &merged_rows)); |
694 | 0 | _add_merged_rows(merged_rows); |
695 | 0 | RowsetSharedPtr rowset; |
696 | 0 | RETURN_IF_ERROR_RESULT(rowset_writer->build(rowset)); |
697 | 0 | return rowset; |
698 | 0 | } |
699 | | |
700 | | Result<RowsetSharedPtr> VLocalSchemaChangeWithSorting::_internal_sorting( |
701 | | const std::vector<std::unique_ptr<vectorized::Block>>& blocks, const Version& version, |
702 | | int64_t newest_write_timestamp, BaseTabletSPtr new_tablet, RowsetTypePB new_rowset_type, |
703 | 0 | SegmentsOverlapPB segments_overlap, TabletSchemaSPtr new_tablet_schema) { |
704 | 0 | uint64_t merged_rows = 0; |
705 | 0 | MultiBlockMerger merger(new_tablet); |
706 | 0 | RowsetWriterContext context; |
707 | 0 | context.version = version; |
708 | 0 | context.rowset_state = VISIBLE; |
709 | 0 | context.segments_overlap = segments_overlap; |
710 | 0 | context.tablet_schema = new_tablet_schema; |
711 | 0 | context.newest_write_timestamp = newest_write_timestamp; |
712 | 0 | context.write_type = DataWriteType::TYPE_SCHEMA_CHANGE; |
713 | 0 | std::unique_ptr<RowsetWriter> rowset_writer; |
714 | | // TODO(plat1ko): Use monad op |
715 | 0 | if (auto result = new_tablet->create_rowset_writer(context, false); !result.has_value()) |
716 | 0 | [[unlikely]] { |
717 | 0 | return unexpected(std::move(result).error()); |
718 | 0 | } else { |
719 | 0 | rowset_writer = std::move(result).value(); |
720 | 0 | } |
721 | 0 | auto guard = _local_storage_engine.pending_local_rowsets().add(context.rowset_id); |
722 | 0 | _pending_rs_guards.push_back(std::move(guard)); |
723 | 0 | RETURN_IF_ERROR_RESULT(merger.merge(blocks, rowset_writer.get(), &merged_rows)); |
724 | 0 | _add_merged_rows(merged_rows); |
725 | 0 | RowsetSharedPtr rowset; |
726 | 0 | RETURN_IF_ERROR_RESULT(rowset_writer->build(rowset)); |
727 | 0 | return rowset; |
728 | 0 | } |
729 | | |
730 | | Status VBaseSchemaChangeWithSorting::_external_sorting(vector<RowsetSharedPtr>& src_rowsets, |
731 | | RowsetWriter* rowset_writer, |
732 | | BaseTabletSPtr new_tablet, |
733 | 0 | TabletSchemaSPtr new_tablet_schema) { |
734 | 0 | std::vector<RowsetReaderSharedPtr> rs_readers; |
735 | 0 | for (auto& rowset : src_rowsets) { |
736 | 0 | RowsetReaderSharedPtr rs_reader; |
737 | 0 | RETURN_IF_ERROR(rowset->create_reader(&rs_reader)); |
738 | 0 | rs_readers.push_back(rs_reader); |
739 | 0 | } |
740 | | |
741 | 0 | Merger::Statistics stats; |
742 | 0 | if (!new_tablet_schema->cluster_key_uids().empty()) { |
743 | | // schema change read rowsets with delete bitmap, so there should be no duplicated keys |
744 | | // RETURN_IF_ERROR(Compaction::update_delete_bitmap()); |
745 | 0 | int64_t way_num = 0; |
746 | 0 | int64_t input_rowsets_data_size = 0; |
747 | 0 | int64_t input_row_num = 0; |
748 | 0 | for (auto& rowset : src_rowsets) { |
749 | 0 | way_num += rowset->rowset_meta()->get_merge_way_num(); |
750 | 0 | input_rowsets_data_size += rowset->data_disk_size(); |
751 | 0 | input_row_num += rowset->num_rows(); |
752 | 0 | } |
753 | 0 | int64_t avg_segment_rows = config::vertical_compaction_max_segment_size / |
754 | 0 | (input_rowsets_data_size / (input_row_num + 1) + 1); |
755 | 0 | RETURN_IF_ERROR(Merger::vertical_merge_rowsets( |
756 | 0 | new_tablet, ReaderType::READER_ALTER_TABLE, *new_tablet_schema, rs_readers, |
757 | 0 | rowset_writer, avg_segment_rows, way_num, &stats)); |
758 | 0 | } else { |
759 | 0 | RETURN_IF_ERROR(Merger::vmerge_rowsets(new_tablet, ReaderType::READER_ALTER_TABLE, |
760 | 0 | *new_tablet_schema, rs_readers, rowset_writer, |
761 | 0 | &stats)); |
762 | 0 | } |
763 | 0 | _add_merged_rows(stats.merged_rows); |
764 | 0 | _add_filtered_rows(stats.filtered_rows); |
765 | 0 | return Status::OK(); |
766 | 0 | } |
767 | | |
768 | | Status VLocalSchemaChangeWithSorting::_inner_process(RowsetReaderSharedPtr rowset_reader, |
769 | | RowsetWriter* rowset_writer, |
770 | | BaseTabletSPtr new_tablet, |
771 | | TabletSchemaSPtr base_tablet_schema, |
772 | 0 | TabletSchemaSPtr new_tablet_schema) { |
773 | 0 | Defer defer {[&]() { |
774 | | // remove the intermediate rowsets generated by internal sorting |
775 | 0 | for (auto& row_set : _src_rowsets) { |
776 | 0 | _local_storage_engine.add_unused_rowset(row_set); |
777 | 0 | } |
778 | 0 | }}; |
779 | 0 | _pending_rs_guards.clear(); |
780 | 0 | return VBaseSchemaChangeWithSorting::_inner_process(rowset_reader, rowset_writer, new_tablet, |
781 | 0 | base_tablet_schema, new_tablet_schema); |
782 | 0 | } |
783 | | |
784 | 0 | Status SchemaChangeJob::process_alter_tablet(const TAlterTabletReqV2& request) { |
785 | 0 | if (!request.__isset.desc_tbl) { |
786 | 0 | return Status::Error<INVALID_ARGUMENT>( |
787 | 0 | "desc_tbl is not set. Maybe the FE version is not equal to the BE " |
788 | 0 | "version."); |
789 | 0 | } |
790 | 0 | if (_base_tablet == nullptr) { |
791 | 0 | return Status::Error<TABLE_NOT_FOUND>("fail to find base tablet. base_tablet={}", |
792 | 0 | request.base_tablet_id); |
793 | 0 | } |
794 | 0 | if (_new_tablet == nullptr) { |
795 | 0 | return Status::Error<TABLE_NOT_FOUND>("fail to find new tablet. new_tablet={}", |
796 | 0 | request.new_tablet_id); |
797 | 0 | } |
798 | | |
799 | 0 | LOG(INFO) << "begin to do request alter tablet: base_tablet_id=" << request.base_tablet_id |
800 | 0 | << ", new_tablet_id=" << request.new_tablet_id |
801 | 0 | << ", alter_version=" << request.alter_version; |
802 | | |
803 | | // Lock schema_change_lock util schema change info is stored in tablet header |
804 | 0 | static constexpr long TRY_LOCK_TIMEOUT = 30; |
805 | 0 | std::unique_lock schema_change_lock(_base_tablet->get_schema_change_lock(), std::defer_lock); |
806 | 0 | bool owns_lock = schema_change_lock.try_lock_for(std::chrono::seconds(TRY_LOCK_TIMEOUT)); |
807 | |
|
808 | 0 | if (!owns_lock) { |
809 | 0 | return Status::Error<TRY_LOCK_FAILED>( |
810 | 0 | "Failed to obtain schema change lock, there might be inverted index being " |
811 | 0 | "built or cooldown runnning on base_tablet={}", |
812 | 0 | request.base_tablet_id); |
813 | 0 | } |
814 | | |
815 | 0 | Status res = _do_process_alter_tablet(request); |
816 | 0 | LOG(INFO) << "finished alter tablet process, res=" << res; |
817 | 0 | DBUG_EXECUTE_IF("SchemaChangeJob::process_alter_tablet.leave.sleep", { sleep(5); }); |
818 | 0 | return res; |
819 | 0 | } |
820 | | |
821 | | SchemaChangeJob::SchemaChangeJob(StorageEngine& local_storage_engine, |
822 | | const TAlterTabletReqV2& request, const std::string& job_id) |
823 | 0 | : _local_storage_engine(local_storage_engine) { |
824 | 0 | _base_tablet = _local_storage_engine.tablet_manager()->get_tablet(request.base_tablet_id); |
825 | 0 | _new_tablet = _local_storage_engine.tablet_manager()->get_tablet(request.new_tablet_id); |
826 | 0 | if (_base_tablet && _new_tablet) { |
827 | 0 | _base_tablet_schema = std::make_shared<TabletSchema>(); |
828 | 0 | _base_tablet_schema->update_tablet_columns(*_base_tablet->tablet_schema(), request.columns); |
829 | | // The request only include column info, do not include bitmap or bloomfilter index info, |
830 | | // So we also need to copy index info from the real base tablet |
831 | 0 | _base_tablet_schema->update_index_info_from(*_base_tablet->tablet_schema()); |
832 | | // During a schema change, the extracted columns of a variant should not be included in the tablet schema. |
833 | | // This is because the schema change for a variant needs to ignore the extracted columns. |
834 | | // Otherwise, the schema types in different rowsets might be inconsistent. When performing a schema change, |
835 | | // the complete variant is constructed by reading all the sub-columns of the variant. |
836 | 0 | _new_tablet_schema = _new_tablet->tablet_schema()->copy_without_variant_extracted_columns(); |
837 | 0 | } |
838 | 0 | _job_id = job_id; |
839 | 0 | } |
840 | | |
841 | | // In the past schema change and rollup will create new tablet and will wait for txns starting before the task to finished |
842 | | // It will cost a lot of time to wait and the task is very difficult to understand. |
843 | | // In alter task v2, FE will call BE to create tablet and send an alter task to BE to convert historical data. |
844 | | // The admin should upgrade all BE and then upgrade FE. |
845 | | // Should delete the old code after upgrade finished. |
846 | 0 | Status SchemaChangeJob::_do_process_alter_tablet(const TAlterTabletReqV2& request) { |
847 | 0 | DBUG_EXECUTE_IF("SchemaChangeJob._do_process_alter_tablet.sleep", { sleep(10); }) |
848 | 0 | Status res; |
849 | 0 | signal::tablet_id = _base_tablet->get_table_id(); |
850 | | |
851 | | // check if tablet's state is not_ready, if it is ready, it means the tablet already finished |
852 | | // check whether the tablet's max continuous version == request.version |
853 | 0 | if (_new_tablet->tablet_state() != TABLET_NOTREADY) { |
854 | 0 | res = _validate_alter_result(request); |
855 | 0 | LOG(INFO) << "tablet's state=" << _new_tablet->tablet_state() |
856 | 0 | << " the convert job already finished, check its version" |
857 | 0 | << " res=" << res; |
858 | 0 | return res; |
859 | 0 | } |
860 | 0 | _new_tablet->set_alter_failed(false); |
861 | 0 | Defer defer([this] { |
862 | | // if tablet state is not TABLET_RUNNING when return, indicates that alter has failed. |
863 | 0 | if (_new_tablet->tablet_state() != TABLET_RUNNING) { |
864 | 0 | _new_tablet->set_alter_failed(true); |
865 | 0 | } |
866 | 0 | }); |
867 | |
|
868 | 0 | LOG(INFO) << "finish to validate alter tablet request. begin to convert data from base tablet " |
869 | 0 | "to new tablet" |
870 | 0 | << " base_tablet=" << _base_tablet->tablet_id() |
871 | 0 | << " new_tablet=" << _new_tablet->tablet_id(); |
872 | |
|
873 | 0 | std::shared_lock base_migration_rlock(_base_tablet->get_migration_lock(), std::try_to_lock); |
874 | 0 | if (!base_migration_rlock.owns_lock()) { |
875 | 0 | return Status::Error<TRY_LOCK_FAILED>( |
876 | 0 | "SchemaChangeJob::_do_process_alter_tablet get lock failed"); |
877 | 0 | } |
878 | 0 | std::shared_lock new_migration_rlock(_new_tablet->get_migration_lock(), std::try_to_lock); |
879 | 0 | if (!new_migration_rlock.owns_lock()) { |
880 | 0 | return Status::Error<TRY_LOCK_FAILED>( |
881 | 0 | "SchemaChangeJob::_do_process_alter_tablet get lock failed"); |
882 | 0 | } |
883 | | |
884 | 0 | std::vector<Version> versions_to_be_changed; |
885 | 0 | int64_t end_version = -1; |
886 | | // reader_context is stack variables, it's lifetime should keep the same |
887 | | // with rs_readers |
888 | 0 | RowsetReaderContext reader_context; |
889 | 0 | std::vector<RowSetSplits> rs_splits; |
890 | | // delete handlers for new tablet |
891 | 0 | DeleteHandler delete_handler; |
892 | 0 | std::vector<ColumnId> return_columns; |
893 | | |
894 | | // Use tablet schema directly from base tablet, they are the newest schema, not contain |
895 | | // dropped column during light weight schema change. |
896 | | // But the tablet schema in base tablet maybe not the latest from FE, so that if fe pass through |
897 | | // a tablet schema, then use request schema. |
898 | 0 | size_t num_cols = |
899 | 0 | request.columns.empty() ? _base_tablet_schema->num_columns() : request.columns.size(); |
900 | 0 | return_columns.resize(num_cols); |
901 | 0 | for (int i = 0; i < num_cols; ++i) { |
902 | 0 | return_columns[i] = i; |
903 | 0 | } |
904 | 0 | std::vector<uint32_t> cluster_key_idxes; |
905 | |
|
906 | 0 | DBUG_EXECUTE_IF("SchemaChangeJob::_do_process_alter_tablet.block", DBUG_BLOCK); |
907 | | |
908 | | // begin to find deltas to convert from base tablet to new tablet so that |
909 | | // obtain base tablet and new tablet's push lock and header write lock to prevent loading data |
910 | 0 | { |
911 | 0 | std::lock_guard base_tablet_lock(_base_tablet->get_push_lock()); |
912 | 0 | std::lock_guard new_tablet_lock(_new_tablet->get_push_lock()); |
913 | 0 | std::lock_guard base_tablet_wlock(_base_tablet->get_header_lock()); |
914 | 0 | SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD); |
915 | 0 | std::lock_guard<std::shared_mutex> new_tablet_wlock(_new_tablet->get_header_lock()); |
916 | |
|
917 | 0 | do { |
918 | 0 | RowsetSharedPtr max_rowset; |
919 | | // get history data to be converted and it will check if there is hold in base tablet |
920 | 0 | res = _get_versions_to_be_changed(&versions_to_be_changed, &max_rowset); |
921 | 0 | if (!res) { |
922 | 0 | LOG(WARNING) << "fail to get version to be changed. res=" << res; |
923 | 0 | break; |
924 | 0 | } |
925 | | |
926 | 0 | DBUG_EXECUTE_IF("SchemaChangeJob.process_alter_tablet.alter_fail", { |
927 | 0 | res = Status::InternalError( |
928 | 0 | "inject alter tablet failed. base_tablet={}, new_tablet={}", |
929 | 0 | request.base_tablet_id, request.new_tablet_id); |
930 | 0 | LOG(WARNING) << "inject error. res=" << res; |
931 | 0 | break; |
932 | 0 | }); |
933 | | |
934 | | // should check the max_version >= request.alter_version, if not the convert is useless |
935 | 0 | if (max_rowset == nullptr || max_rowset->end_version() < request.alter_version) { |
936 | 0 | res = Status::InternalError( |
937 | 0 | "base tablet's max version={} is less than request version={}", |
938 | 0 | (max_rowset == nullptr ? 0 : max_rowset->end_version()), |
939 | 0 | request.alter_version); |
940 | 0 | break; |
941 | 0 | } |
942 | | // before calculating version_to_be_changed, |
943 | | // remove all data from new tablet, prevent to rewrite data(those double pushed when wait) |
944 | 0 | LOG(INFO) << "begin to remove all data before end version from new tablet to prevent " |
945 | 0 | "rewrite." |
946 | 0 | << " new_tablet=" << _new_tablet->tablet_id() |
947 | 0 | << ", end_version=" << max_rowset->end_version(); |
948 | 0 | std::vector<RowsetSharedPtr> rowsets_to_delete; |
949 | 0 | std::vector<std::pair<Version, RowsetSharedPtr>> version_rowsets; |
950 | 0 | _new_tablet->acquire_version_and_rowsets(&version_rowsets); |
951 | 0 | std::sort(version_rowsets.begin(), version_rowsets.end(), |
952 | 0 | [](const std::pair<Version, RowsetSharedPtr>& l, |
953 | 0 | const std::pair<Version, RowsetSharedPtr>& r) { |
954 | 0 | return l.first.first < r.first.first; |
955 | 0 | }); |
956 | 0 | for (auto& pair : version_rowsets) { |
957 | 0 | if (pair.first.second <= max_rowset->end_version()) { |
958 | 0 | rowsets_to_delete.push_back(pair.second); |
959 | 0 | } else if (pair.first.first <= max_rowset->end_version()) { |
960 | | // If max version is [X-10] and new tablet has version [7-9][10-12], |
961 | | // we only can remove [7-9] from new tablet. If we add [X-10] to new tablet, it will has version |
962 | | // cross: [X-10] [10-12]. |
963 | | // So, we should return OLAP_ERR_VERSION_ALREADY_MERGED for fast fail. |
964 | 0 | return Status::Error<VERSION_ALREADY_MERGED>( |
965 | 0 | "New tablet has a version {} crossing base tablet's max_version={}", |
966 | 0 | pair.first.to_string(), max_rowset->end_version()); |
967 | 0 | } |
968 | 0 | } |
969 | 0 | std::vector<RowsetSharedPtr> empty_vec; |
970 | 0 | RETURN_IF_ERROR(_new_tablet->delete_rowsets(rowsets_to_delete, false)); |
971 | | // inherit cumulative_layer_point from base_tablet |
972 | | // check if new_tablet.ce_point > base_tablet.ce_point? |
973 | 0 | _new_tablet->set_cumulative_layer_point(-1); |
974 | | // save tablet meta |
975 | 0 | _new_tablet->save_meta(); |
976 | 0 | for (auto& rowset : rowsets_to_delete) { |
977 | | // do not call rowset.remove directly, using gc thread to delete it |
978 | 0 | _local_storage_engine.add_unused_rowset(rowset); |
979 | 0 | } |
980 | | |
981 | | // init one delete handler |
982 | 0 | for (auto& version : versions_to_be_changed) { |
983 | 0 | end_version = std::max(end_version, version.second); |
984 | 0 | } |
985 | | |
986 | | // acquire data sources correspond to history versions |
987 | 0 | RETURN_IF_ERROR( |
988 | 0 | _base_tablet->capture_rs_readers_unlocked(versions_to_be_changed, &rs_splits)); |
989 | 0 | if (rs_splits.empty()) { |
990 | 0 | res = Status::Error<ALTER_DELTA_DOES_NOT_EXISTS>( |
991 | 0 | "fail to acquire all data sources. version_num={}, data_source_num={}", |
992 | 0 | versions_to_be_changed.size(), rs_splits.size()); |
993 | 0 | break; |
994 | 0 | } |
995 | 0 | std::vector<RowsetMetaSharedPtr> del_preds; |
996 | 0 | for (auto&& split : rs_splits) { |
997 | 0 | const auto& rs_meta = split.rs_reader->rowset()->rowset_meta(); |
998 | 0 | if (!rs_meta->has_delete_predicate() || rs_meta->start_version() > end_version) { |
999 | 0 | continue; |
1000 | 0 | } |
1001 | 0 | _base_tablet_schema->merge_dropped_columns(*rs_meta->tablet_schema()); |
1002 | 0 | del_preds.push_back(rs_meta); |
1003 | 0 | } |
1004 | 0 | res = delete_handler.init(_base_tablet_schema, del_preds, end_version); |
1005 | 0 | if (!res) { |
1006 | 0 | LOG(WARNING) << "init delete handler failed. base_tablet=" |
1007 | 0 | << _base_tablet->tablet_id() << ", end_version=" << end_version; |
1008 | 0 | break; |
1009 | 0 | } |
1010 | | |
1011 | 0 | reader_context.reader_type = ReaderType::READER_ALTER_TABLE; |
1012 | 0 | reader_context.tablet_schema = _base_tablet_schema; |
1013 | 0 | reader_context.need_ordered_result = true; |
1014 | 0 | reader_context.delete_handler = &delete_handler; |
1015 | 0 | reader_context.return_columns = &return_columns; |
1016 | 0 | reader_context.sequence_id_idx = reader_context.tablet_schema->sequence_col_idx(); |
1017 | 0 | reader_context.is_unique = _base_tablet->keys_type() == UNIQUE_KEYS; |
1018 | 0 | reader_context.batch_size = ALTER_TABLE_BATCH_SIZE; |
1019 | 0 | reader_context.delete_bitmap = &_base_tablet->tablet_meta()->delete_bitmap(); |
1020 | 0 | reader_context.version = Version(0, end_version); |
1021 | 0 | if (!_base_tablet_schema->cluster_key_uids().empty()) { |
1022 | 0 | for (const auto& uid : _base_tablet_schema->cluster_key_uids()) { |
1023 | 0 | cluster_key_idxes.emplace_back(_base_tablet_schema->field_index(uid)); |
1024 | 0 | } |
1025 | 0 | reader_context.read_orderby_key_columns = &cluster_key_idxes; |
1026 | 0 | reader_context.is_unique = false; |
1027 | 0 | reader_context.sequence_id_idx = -1; |
1028 | 0 | } |
1029 | 0 | for (auto& rs_split : rs_splits) { |
1030 | 0 | res = rs_split.rs_reader->init(&reader_context); |
1031 | 0 | if (!res) { |
1032 | 0 | LOG(WARNING) << "failed to init rowset reader: " << _base_tablet->tablet_id(); |
1033 | 0 | break; |
1034 | 0 | } |
1035 | 0 | } |
1036 | 0 | } while (false); |
1037 | 0 | } |
1038 | | |
1039 | 0 | do { |
1040 | 0 | if (!res) { |
1041 | 0 | break; |
1042 | 0 | } |
1043 | 0 | SchemaChangeParams sc_params; |
1044 | |
|
1045 | 0 | RETURN_IF_ERROR( |
1046 | 0 | DescriptorTbl::create(&sc_params.pool, request.desc_tbl, &sc_params.desc_tbl)); |
1047 | 0 | sc_params.ref_rowset_readers.reserve(rs_splits.size()); |
1048 | 0 | for (RowSetSplits& split : rs_splits) { |
1049 | 0 | sc_params.ref_rowset_readers.emplace_back(split.rs_reader); |
1050 | 0 | } |
1051 | 0 | sc_params.delete_handler = &delete_handler; |
1052 | 0 | sc_params.be_exec_version = request.be_exec_version; |
1053 | 0 | DCHECK(request.__isset.alter_tablet_type); |
1054 | 0 | switch (request.alter_tablet_type) { |
1055 | 0 | case TAlterTabletType::SCHEMA_CHANGE: |
1056 | 0 | sc_params.alter_tablet_type = AlterTabletType::SCHEMA_CHANGE; |
1057 | 0 | break; |
1058 | 0 | case TAlterTabletType::ROLLUP: |
1059 | 0 | sc_params.alter_tablet_type = AlterTabletType::ROLLUP; |
1060 | 0 | break; |
1061 | 0 | case TAlterTabletType::MIGRATION: |
1062 | 0 | sc_params.alter_tablet_type = AlterTabletType::MIGRATION; |
1063 | 0 | break; |
1064 | 0 | } |
1065 | 0 | if (request.__isset.materialized_view_params) { |
1066 | 0 | for (auto item : request.materialized_view_params) { |
1067 | 0 | AlterMaterializedViewParam mv_param; |
1068 | 0 | mv_param.column_name = item.column_name; |
1069 | |
|
1070 | 0 | if (item.__isset.mv_expr) { |
1071 | 0 | mv_param.expr = std::make_shared<TExpr>(item.mv_expr); |
1072 | 0 | } |
1073 | 0 | sc_params.materialized_params_map.insert( |
1074 | 0 | std::make_pair(to_lower(item.column_name), mv_param)); |
1075 | 0 | } |
1076 | 0 | } |
1077 | 0 | { |
1078 | 0 | std::lock_guard<std::shared_mutex> wrlock(_mutex); |
1079 | 0 | _tablet_ids_in_converting.insert(_new_tablet->tablet_id()); |
1080 | 0 | } |
1081 | 0 | int64_t real_alter_version = 0; |
1082 | 0 | sc_params.enable_unique_key_merge_on_write = |
1083 | 0 | _new_tablet->enable_unique_key_merge_on_write(); |
1084 | 0 | res = _convert_historical_rowsets(sc_params, &real_alter_version); |
1085 | 0 | { |
1086 | 0 | std::lock_guard<std::shared_mutex> wrlock(_mutex); |
1087 | 0 | _tablet_ids_in_converting.erase(_new_tablet->tablet_id()); |
1088 | 0 | } |
1089 | 0 | if (!res) { |
1090 | 0 | break; |
1091 | 0 | } |
1092 | | |
1093 | 0 | DCHECK_GE(real_alter_version, request.alter_version); |
1094 | |
|
1095 | 0 | if (_new_tablet->keys_type() == UNIQUE_KEYS && |
1096 | 0 | _new_tablet->enable_unique_key_merge_on_write()) { |
1097 | 0 | res = _calc_delete_bitmap_for_mow_table(real_alter_version); |
1098 | 0 | if (!res) { |
1099 | 0 | break; |
1100 | 0 | } |
1101 | 0 | } else { |
1102 | | // set state to ready |
1103 | 0 | std::lock_guard<std::shared_mutex> new_wlock(_new_tablet->get_header_lock()); |
1104 | 0 | SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD); |
1105 | 0 | res = _new_tablet->set_tablet_state(TabletState::TABLET_RUNNING); |
1106 | 0 | if (!res) { |
1107 | 0 | break; |
1108 | 0 | } |
1109 | 0 | _new_tablet->save_meta(); |
1110 | 0 | } |
1111 | 0 | } while (false); |
1112 | | |
1113 | 0 | if (res) { |
1114 | | // _validate_alter_result should be outside the above while loop. |
1115 | | // to avoid requiring the header lock twice. |
1116 | 0 | res = _validate_alter_result(request); |
1117 | 0 | } |
1118 | | |
1119 | | // if failed convert history data, then just remove the new tablet |
1120 | 0 | if (!res) { |
1121 | 0 | LOG(WARNING) << "failed to alter tablet. base_tablet=" << _base_tablet->tablet_id() |
1122 | 0 | << ", drop new_tablet=" << _new_tablet->tablet_id(); |
1123 | | // do not drop the new tablet and its data. GC thread will |
1124 | 0 | } |
1125 | |
|
1126 | 0 | return res; |
1127 | 0 | } |
1128 | | |
1129 | 0 | bool SchemaChangeJob::tablet_in_converting(int64_t tablet_id) { |
1130 | 0 | std::shared_lock rdlock(_mutex); |
1131 | 0 | return _tablet_ids_in_converting.find(tablet_id) != _tablet_ids_in_converting.end(); |
1132 | 0 | } |
1133 | | |
1134 | | Status SchemaChangeJob::_get_versions_to_be_changed(std::vector<Version>* versions_to_be_changed, |
1135 | 0 | RowsetSharedPtr* max_rowset) { |
1136 | 0 | RowsetSharedPtr rowset = _base_tablet->get_rowset_with_max_version(); |
1137 | 0 | if (rowset == nullptr) { |
1138 | 0 | return Status::Error<ALTER_DELTA_DOES_NOT_EXISTS>("Tablet has no version. base_tablet={}", |
1139 | 0 | _base_tablet->tablet_id()); |
1140 | 0 | } |
1141 | 0 | *max_rowset = rowset; |
1142 | |
|
1143 | 0 | RETURN_IF_ERROR(_base_tablet->capture_consistent_versions_unlocked( |
1144 | 0 | Version(0, rowset->version().second), versions_to_be_changed, false, false)); |
1145 | | |
1146 | 0 | return Status::OK(); |
1147 | 0 | } |
1148 | | |
1149 | | // The `real_alter_version` parameter indicates that the version of [0-real_alter_version] is |
1150 | | // converted from a base tablet, only used for the mow table now. |
1151 | | Status SchemaChangeJob::_convert_historical_rowsets(const SchemaChangeParams& sc_params, |
1152 | 0 | int64_t* real_alter_version) { |
1153 | 0 | LOG(INFO) << "begin to convert historical rowsets for new_tablet from base_tablet." |
1154 | 0 | << " base_tablet=" << _base_tablet->tablet_id() |
1155 | 0 | << ", new_tablet=" << _new_tablet->tablet_id() << ", job_id=" << _job_id; |
1156 | | |
1157 | | // find end version |
1158 | 0 | int32_t end_version = -1; |
1159 | 0 | for (const auto& ref_rowset_reader : sc_params.ref_rowset_readers) { |
1160 | 0 | if (ref_rowset_reader->version().second > end_version) { |
1161 | 0 | end_version = ref_rowset_reader->version().second; |
1162 | 0 | } |
1163 | 0 | } |
1164 | | |
1165 | | // Add filter information in change, and filter column information will be set in parse_request |
1166 | | // And filter some data every time the row block changes |
1167 | 0 | BlockChanger changer(_new_tablet_schema, *sc_params.desc_tbl); |
1168 | |
|
1169 | 0 | bool sc_sorting = false; |
1170 | 0 | bool sc_directly = false; |
1171 | | |
1172 | | // a.Parse the Alter request and convert it into an internal representation |
1173 | 0 | Status res = parse_request(sc_params, _base_tablet_schema.get(), _new_tablet_schema.get(), |
1174 | 0 | &changer, &sc_sorting, &sc_directly); |
1175 | 0 | LOG(INFO) << "schema change type, sc_sorting: " << sc_sorting |
1176 | 0 | << ", sc_directly: " << sc_directly << ", base_tablet=" << _base_tablet->tablet_id() |
1177 | 0 | << ", new_tablet=" << _new_tablet->tablet_id(); |
1178 | |
|
1179 | 0 | auto process_alter_exit = [&]() -> Status { |
1180 | 0 | { |
1181 | | // save tablet meta here because rowset meta is not saved during add rowset |
1182 | 0 | std::lock_guard new_wlock(_new_tablet->get_header_lock()); |
1183 | 0 | SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD); |
1184 | 0 | _new_tablet->save_meta(); |
1185 | 0 | } |
1186 | 0 | if (res) { |
1187 | 0 | Version test_version(0, end_version); |
1188 | 0 | res = _new_tablet->check_version_integrity(test_version); |
1189 | 0 | } |
1190 | |
|
1191 | 0 | LOG(INFO) << "finish converting rowsets for new_tablet from base_tablet. " |
1192 | 0 | << "base_tablet=" << _base_tablet->tablet_id() |
1193 | 0 | << ", new_tablet=" << _new_tablet->tablet_id(); |
1194 | 0 | return res; |
1195 | 0 | }; |
1196 | |
|
1197 | 0 | if (!res) { |
1198 | 0 | LOG(WARNING) << "failed to parse the request. res=" << res; |
1199 | 0 | return process_alter_exit(); |
1200 | 0 | } |
1201 | | |
1202 | 0 | if (!sc_sorting && !sc_directly && sc_params.alter_tablet_type == AlterTabletType::ROLLUP) { |
1203 | 0 | res = Status::Error<SCHEMA_SCHEMA_INVALID>( |
1204 | 0 | "Don't support to add materialized view by linked schema change"); |
1205 | 0 | return process_alter_exit(); |
1206 | 0 | } |
1207 | | |
1208 | | // b. Generate historical data converter |
1209 | 0 | auto sc_procedure = _get_sc_procedure( |
1210 | 0 | changer, sc_sorting, sc_directly, |
1211 | 0 | _local_storage_engine.memory_limitation_bytes_per_thread_for_schema_change()); |
1212 | |
|
1213 | 0 | DBUG_EXECUTE_IF("SchemaChangeJob::_convert_historical_rowsets.block", DBUG_BLOCK); |
1214 | | |
1215 | | // c.Convert historical data |
1216 | 0 | bool have_failure_rowset = false; |
1217 | 0 | for (const auto& rs_reader : sc_params.ref_rowset_readers) { |
1218 | | // set status for monitor |
1219 | | // As long as there is a new_table as running, ref table is set as running |
1220 | | // NOTE If the first sub_table fails first, it will continue to go as normal here |
1221 | | // When tablet create new rowset writer, it may change rowset type, in this case |
1222 | | // linked schema change will not be used. |
1223 | 0 | RowsetWriterContext context; |
1224 | 0 | context.version = rs_reader->version(); |
1225 | 0 | context.rowset_state = VISIBLE; |
1226 | 0 | context.segments_overlap = rs_reader->rowset()->rowset_meta()->segments_overlap(); |
1227 | 0 | context.tablet_schema = _new_tablet_schema; |
1228 | 0 | context.newest_write_timestamp = rs_reader->newest_write_timestamp(); |
1229 | |
|
1230 | 0 | if (!rs_reader->rowset()->is_local()) { |
1231 | 0 | context.storage_resource = |
1232 | 0 | *DORIS_TRY(rs_reader->rowset()->rowset_meta()->remote_storage_resource()); |
1233 | 0 | } |
1234 | | |
1235 | 0 | context.write_type = DataWriteType::TYPE_SCHEMA_CHANGE; |
1236 | | // TODO if support VerticalSegmentWriter, also need to handle cluster key primary key index |
1237 | 0 | bool vertical = false; |
1238 | 0 | if (sc_sorting && !_new_tablet->tablet_schema()->cluster_key_uids().empty()) { |
1239 | | // see VBaseSchemaChangeWithSorting::_external_sorting |
1240 | 0 | vertical = true; |
1241 | 0 | } |
1242 | 0 | auto result = _new_tablet->create_rowset_writer(context, vertical); |
1243 | 0 | if (!result.has_value()) { |
1244 | 0 | res = Status::Error<ROWSET_BUILDER_INIT>("create_rowset_writer failed, reason={}", |
1245 | 0 | result.error().to_string()); |
1246 | 0 | return process_alter_exit(); |
1247 | 0 | } |
1248 | 0 | auto rowset_writer = std::move(result).value(); |
1249 | 0 | auto pending_rs_guard = _local_storage_engine.add_pending_rowset(context); |
1250 | |
|
1251 | 0 | if (res = sc_procedure->process(rs_reader, rowset_writer.get(), _new_tablet, _base_tablet, |
1252 | 0 | _base_tablet_schema, _new_tablet_schema); |
1253 | 0 | !res) { |
1254 | 0 | LOG(WARNING) << "failed to process the version." |
1255 | 0 | << " version=" << rs_reader->version().first << "-" |
1256 | 0 | << rs_reader->version().second << ", " << res.to_string(); |
1257 | 0 | return process_alter_exit(); |
1258 | 0 | } |
1259 | | // Add the new version of the data to the header |
1260 | | // In order to prevent the occurrence of deadlock, we must first lock the old table, and then lock the new table |
1261 | 0 | std::lock_guard lock(_new_tablet->get_push_lock()); |
1262 | 0 | RowsetSharedPtr new_rowset; |
1263 | 0 | if (!(res = rowset_writer->build(new_rowset)).ok()) { |
1264 | 0 | LOG(WARNING) << "failed to build rowset, exit alter process"; |
1265 | 0 | return process_alter_exit(); |
1266 | 0 | } |
1267 | 0 | res = _new_tablet->add_rowset(new_rowset); |
1268 | 0 | if (res.is<PUSH_VERSION_ALREADY_EXIST>()) { |
1269 | 0 | LOG(WARNING) << "version already exist, version revert occurred. " |
1270 | 0 | << "tablet=" << _new_tablet->tablet_id() << ", version='" |
1271 | 0 | << rs_reader->version().first << "-" << rs_reader->version().second; |
1272 | 0 | _local_storage_engine.add_unused_rowset(new_rowset); |
1273 | 0 | have_failure_rowset = true; |
1274 | 0 | res = Status::OK(); |
1275 | 0 | } else if (!res) { |
1276 | 0 | LOG(WARNING) << "failed to register new version. " |
1277 | 0 | << " tablet=" << _new_tablet->tablet_id() |
1278 | 0 | << ", version=" << rs_reader->version().first << "-" |
1279 | 0 | << rs_reader->version().second; |
1280 | 0 | _local_storage_engine.add_unused_rowset(new_rowset); |
1281 | 0 | return process_alter_exit(); |
1282 | 0 | } else { |
1283 | 0 | VLOG_NOTICE << "register new version. tablet=" << _new_tablet->tablet_id() |
1284 | 0 | << ", version=" << rs_reader->version().first << "-" |
1285 | 0 | << rs_reader->version().second; |
1286 | 0 | } |
1287 | 0 | if (!have_failure_rowset) { |
1288 | 0 | *real_alter_version = rs_reader->version().second; |
1289 | 0 | } |
1290 | |
|
1291 | 0 | VLOG_TRACE << "succeed to convert a history version." |
1292 | 0 | << " version=" << rs_reader->version().first << "-" |
1293 | 0 | << rs_reader->version().second; |
1294 | 0 | } |
1295 | | |
1296 | | // XXX:The SchemaChange state should not be canceled at this time, because the new Delta has to be converted to the old and new Schema version |
1297 | 0 | return process_alter_exit(); |
1298 | 0 | } |
1299 | | |
1300 | | static const std::string WHERE_SIGN_LOWER = to_lower("__DORIS_WHERE_SIGN__"); |
1301 | | |
1302 | | // @static |
1303 | | // Analyze the mapping of the column and the mapping of the filter key |
1304 | | Status SchemaChangeJob::parse_request(const SchemaChangeParams& sc_params, |
1305 | | TabletSchema* base_tablet_schema, |
1306 | | TabletSchema* new_tablet_schema, BlockChanger* changer, |
1307 | 0 | bool* sc_sorting, bool* sc_directly) { |
1308 | 0 | changer->set_type(sc_params.alter_tablet_type); |
1309 | 0 | changer->set_compatible_version(sc_params.be_exec_version); |
1310 | |
|
1311 | 0 | const std::unordered_map<std::string, AlterMaterializedViewParam>& materialized_function_map = |
1312 | 0 | sc_params.materialized_params_map; |
1313 | 0 | DescriptorTbl desc_tbl = *sc_params.desc_tbl; |
1314 | | |
1315 | | // set column mapping |
1316 | 0 | for (int i = 0, new_schema_size = new_tablet_schema->num_columns(); i < new_schema_size; ++i) { |
1317 | 0 | const TabletColumn& new_column = new_tablet_schema->column(i); |
1318 | 0 | const std::string& column_name_lower = to_lower(new_column.name()); |
1319 | 0 | ColumnMapping* column_mapping = changer->get_mutable_column_mapping(i); |
1320 | 0 | column_mapping->new_column = &new_column; |
1321 | |
|
1322 | 0 | column_mapping->ref_column_idx = base_tablet_schema->field_index(new_column.name()); |
1323 | |
|
1324 | 0 | if (materialized_function_map.find(column_name_lower) != materialized_function_map.end()) { |
1325 | 0 | auto mv_param = materialized_function_map.find(column_name_lower)->second; |
1326 | 0 | column_mapping->expr = mv_param.expr; |
1327 | 0 | if (column_mapping->expr != nullptr) { |
1328 | 0 | continue; |
1329 | 0 | } |
1330 | 0 | } |
1331 | | |
1332 | 0 | if (column_mapping->ref_column_idx >= 0) { |
1333 | 0 | continue; |
1334 | 0 | } |
1335 | | |
1336 | 0 | if (sc_params.alter_tablet_type == ROLLUP) { |
1337 | 0 | std::string materialized_function_map_str; |
1338 | 0 | for (auto str : materialized_function_map) { |
1339 | 0 | if (!materialized_function_map_str.empty()) { |
1340 | 0 | materialized_function_map_str += ','; |
1341 | 0 | } |
1342 | 0 | materialized_function_map_str += str.first; |
1343 | 0 | } |
1344 | 0 | return Status::InternalError( |
1345 | 0 | "referenced column was missing. [column={},materialized_function_map={}]", |
1346 | 0 | new_column.name(), materialized_function_map_str); |
1347 | 0 | } |
1348 | | |
1349 | 0 | if (new_column.name().find("__doris_shadow_") == 0) { |
1350 | | // Should delete in the future, just a protection for bug. |
1351 | 0 | LOG(INFO) << "a shadow column is encountered " << new_column.name(); |
1352 | 0 | return Status::InternalError("failed due to operate on shadow column"); |
1353 | 0 | } |
1354 | | // Newly added column go here |
1355 | 0 | column_mapping->ref_column_idx = -1; |
1356 | |
|
1357 | 0 | if (i < base_tablet_schema->num_short_key_columns()) { |
1358 | 0 | *sc_directly = true; |
1359 | 0 | } |
1360 | 0 | RETURN_IF_ERROR( |
1361 | 0 | _init_column_mapping(column_mapping, new_column, new_column.default_value())); |
1362 | | |
1363 | 0 | LOG(INFO) << "A column with default value will be added after schema changing. " |
1364 | 0 | << "column=" << new_column.name() |
1365 | 0 | << ", default_value=" << new_column.default_value(); |
1366 | 0 | } |
1367 | | |
1368 | 0 | if (materialized_function_map.contains(WHERE_SIGN_LOWER)) { |
1369 | 0 | changer->set_where_expr(materialized_function_map.find(WHERE_SIGN_LOWER)->second.expr); |
1370 | 0 | } |
1371 | | |
1372 | | // If the reference sequence of the Key column is out of order, it needs to be reordered |
1373 | 0 | int num_default_value = 0; |
1374 | |
|
1375 | 0 | for (int i = 0, new_schema_size = new_tablet_schema->num_key_columns(); i < new_schema_size; |
1376 | 0 | ++i) { |
1377 | 0 | ColumnMapping* column_mapping = changer->get_mutable_column_mapping(i); |
1378 | |
|
1379 | 0 | if (!column_mapping->has_reference()) { |
1380 | 0 | num_default_value++; |
1381 | 0 | continue; |
1382 | 0 | } |
1383 | | |
1384 | 0 | if (column_mapping->ref_column_idx != i - num_default_value) { |
1385 | 0 | *sc_sorting = true; |
1386 | 0 | return Status::OK(); |
1387 | 0 | } |
1388 | 0 | } |
1389 | | |
1390 | 0 | if (base_tablet_schema->keys_type() != new_tablet_schema->keys_type()) { |
1391 | | // only when base table is dup and mv is agg |
1392 | | // the rollup job must be reagg. |
1393 | 0 | *sc_sorting = true; |
1394 | 0 | return Status::OK(); |
1395 | 0 | } |
1396 | | |
1397 | | // If the sort of key has not been changed but the new keys num is less then base's, |
1398 | | // the new table should be re agg. |
1399 | | // So we also need to set sc_sorting = true. |
1400 | | // A, B, C are keys(sort keys), D is value |
1401 | | // followings need resort: |
1402 | | // old keys: A B C D |
1403 | | // new keys: A B |
1404 | 0 | if (new_tablet_schema->keys_type() != KeysType::DUP_KEYS && |
1405 | 0 | new_tablet_schema->num_key_columns() < base_tablet_schema->num_key_columns()) { |
1406 | | // this is a table with aggregate key type, and num of key columns in new schema |
1407 | | // is less, which means the data in new tablet should be more aggregated. |
1408 | | // so we use sorting schema change to sort and merge the data. |
1409 | 0 | *sc_sorting = true; |
1410 | 0 | return Status::OK(); |
1411 | 0 | } |
1412 | | |
1413 | 0 | if (sc_params.alter_tablet_type == ROLLUP) { |
1414 | 0 | *sc_directly = true; |
1415 | 0 | return Status::OK(); |
1416 | 0 | } |
1417 | | |
1418 | 0 | if (sc_params.enable_unique_key_merge_on_write && |
1419 | 0 | new_tablet_schema->num_key_columns() > base_tablet_schema->num_key_columns()) { |
1420 | 0 | *sc_directly = true; |
1421 | 0 | return Status::OK(); |
1422 | 0 | } |
1423 | | |
1424 | 0 | if (base_tablet_schema->num_short_key_columns() != new_tablet_schema->num_short_key_columns()) { |
1425 | | // the number of short_keys changed, can't do linked schema change |
1426 | 0 | *sc_directly = true; |
1427 | 0 | return Status::OK(); |
1428 | 0 | } |
1429 | | |
1430 | 0 | if (!sc_params.delete_handler->empty()) { |
1431 | | // there exists delete condition in header, can't do linked schema change |
1432 | 0 | *sc_directly = true; |
1433 | 0 | return Status::OK(); |
1434 | 0 | } |
1435 | | |
1436 | | // if new tablet enable row store, or new tablet has different row store columns |
1437 | 0 | if ((!base_tablet_schema->exist_column(BeConsts::ROW_STORE_COL) && |
1438 | 0 | new_tablet_schema->exist_column(BeConsts::ROW_STORE_COL)) || |
1439 | 0 | !std::equal(new_tablet_schema->row_columns_uids().begin(), |
1440 | 0 | new_tablet_schema->row_columns_uids().end(), |
1441 | 0 | base_tablet_schema->row_columns_uids().begin(), |
1442 | 0 | base_tablet_schema->row_columns_uids().end())) { |
1443 | 0 | *sc_directly = true; |
1444 | 0 | } |
1445 | |
|
1446 | 0 | for (size_t i = 0; i < new_tablet_schema->num_columns(); ++i) { |
1447 | 0 | ColumnMapping* column_mapping = changer->get_mutable_column_mapping(i); |
1448 | 0 | if (column_mapping->expr != nullptr) { |
1449 | 0 | *sc_directly = true; |
1450 | 0 | return Status::OK(); |
1451 | 0 | } else if (column_mapping->ref_column_idx >= 0) { |
1452 | | // index changed |
1453 | 0 | if (vectorized::schema_util::has_schema_index_diff( |
1454 | 0 | new_tablet_schema, base_tablet_schema, i, column_mapping->ref_column_idx)) { |
1455 | 0 | *sc_directly = true; |
1456 | 0 | return Status::OK(); |
1457 | 0 | } |
1458 | 0 | } |
1459 | 0 | } |
1460 | | |
1461 | | // if rs_reader has remote files, link schema change is not supported, |
1462 | | // use directly schema change instead. |
1463 | 0 | if (!(*sc_directly) && !(*sc_sorting)) { |
1464 | | // check has remote rowset |
1465 | | // work for cloud and cold storage |
1466 | 0 | for (const auto& rs_reader : sc_params.ref_rowset_readers) { |
1467 | 0 | if (!rs_reader->rowset()->is_local()) { |
1468 | 0 | *sc_directly = true; |
1469 | 0 | break; |
1470 | 0 | } |
1471 | 0 | } |
1472 | 0 | } |
1473 | |
|
1474 | 0 | return Status::OK(); |
1475 | 0 | } |
1476 | | |
1477 | | Status SchemaChangeJob::_init_column_mapping(ColumnMapping* column_mapping, |
1478 | | const TabletColumn& column_schema, |
1479 | 0 | const std::string& value) { |
1480 | 0 | if (auto field = WrapperField::create(column_schema); field.has_value()) { |
1481 | 0 | column_mapping->default_value = field.value(); |
1482 | 0 | } else { |
1483 | 0 | return field.error(); |
1484 | 0 | } |
1485 | | |
1486 | 0 | if (column_schema.is_nullable() && value.length() == 0) { |
1487 | 0 | column_mapping->default_value->set_null(); |
1488 | 0 | } else { |
1489 | 0 | RETURN_IF_ERROR(column_mapping->default_value->from_string(value, column_schema.precision(), |
1490 | 0 | column_schema.frac())); |
1491 | 0 | } |
1492 | | |
1493 | 0 | return Status::OK(); |
1494 | 0 | } |
1495 | | |
1496 | 0 | Status SchemaChangeJob::_validate_alter_result(const TAlterTabletReqV2& request) { |
1497 | 0 | Version max_continuous_version = {-1, 0}; |
1498 | 0 | _new_tablet->max_continuous_version_from_beginning(&max_continuous_version); |
1499 | 0 | LOG(INFO) << "find max continuous version of tablet=" << _new_tablet->tablet_id() |
1500 | 0 | << ", start_version=" << max_continuous_version.first |
1501 | 0 | << ", end_version=" << max_continuous_version.second; |
1502 | 0 | if (max_continuous_version.second < request.alter_version) { |
1503 | 0 | return Status::InternalError("result version={} is less than request version={}", |
1504 | 0 | max_continuous_version.second, request.alter_version); |
1505 | 0 | } |
1506 | | |
1507 | 0 | std::vector<std::pair<Version, RowsetSharedPtr>> version_rowsets; |
1508 | 0 | { |
1509 | 0 | std::shared_lock rdlock(_new_tablet->get_header_lock()); |
1510 | 0 | _new_tablet->acquire_version_and_rowsets(&version_rowsets); |
1511 | 0 | } |
1512 | 0 | for (auto& pair : version_rowsets) { |
1513 | 0 | RowsetSharedPtr rowset = pair.second; |
1514 | 0 | if (!rowset->check_file_exist()) { |
1515 | 0 | return Status::Error<NOT_FOUND>( |
1516 | 0 | "SchemaChangeJob::_validate_alter_result meet invalid rowset"); |
1517 | 0 | } |
1518 | 0 | } |
1519 | 0 | return Status::OK(); |
1520 | 0 | } |
1521 | | |
1522 | | // For unique with merge-on-write table, should process delete bitmap here. |
1523 | | // 1. During double write, the newly imported rowsets does not calculate |
1524 | | // delete bitmap and publish successfully. |
1525 | | // 2. After conversion, calculate delete bitmap for the rowsets imported |
1526 | | // during double write. During this period, new data can still be imported |
1527 | | // witout calculating delete bitmap and publish successfully. |
1528 | | // 3. Block the new publish, calculate the delete bitmap of the |
1529 | | // incremental rowsets. |
1530 | | // 4. Switch the tablet status to TABLET_RUNNING. The newly imported |
1531 | | // data will calculate delete bitmap. |
1532 | 0 | Status SchemaChangeJob::_calc_delete_bitmap_for_mow_table(int64_t alter_version) { |
1533 | 0 | DBUG_EXECUTE_IF("SchemaChangeJob._calc_delete_bitmap_for_mow_table.random_failed", { |
1534 | 0 | if (rand() % 100 < (100 * dp->param("percent", 0.1))) { |
1535 | 0 | LOG_WARNING("SchemaChangeJob._calc_delete_bitmap_for_mow_table.random_failed"); |
1536 | 0 | return Status::InternalError("debug schema change calc delete bitmap random failed"); |
1537 | 0 | } |
1538 | 0 | }); |
1539 | | |
1540 | | // can't do compaction when calc delete bitmap, if the rowset being calculated does |
1541 | | // a compaction, it may cause the delete bitmap to be missed. |
1542 | 0 | std::lock_guard base_compaction_lock(_new_tablet->get_base_compaction_lock()); |
1543 | 0 | std::lock_guard cumu_compaction_lock(_new_tablet->get_cumulative_compaction_lock()); |
1544 | | |
1545 | | // step 2 |
1546 | 0 | int64_t max_version = _new_tablet->max_version().second; |
1547 | 0 | std::vector<RowsetSharedPtr> rowsets; |
1548 | 0 | if (alter_version < max_version) { |
1549 | 0 | LOG(INFO) << "alter table for unique with merge-on-write, calculate delete bitmap of " |
1550 | 0 | << "double write rowsets for version: " << alter_version + 1 << "-" << max_version |
1551 | 0 | << " new_tablet=" << _new_tablet->tablet_id(); |
1552 | 0 | std::shared_lock rlock(_new_tablet->get_header_lock()); |
1553 | 0 | RETURN_IF_ERROR(_new_tablet->capture_consistent_rowsets_unlocked( |
1554 | 0 | {alter_version + 1, max_version}, &rowsets)); |
1555 | 0 | } |
1556 | 0 | for (auto rowset_ptr : rowsets) { |
1557 | 0 | std::lock_guard rwlock(_new_tablet->get_rowset_update_lock()); |
1558 | 0 | std::shared_lock rlock(_new_tablet->get_header_lock()); |
1559 | 0 | RETURN_IF_ERROR(Tablet::update_delete_bitmap_without_lock(_new_tablet, rowset_ptr)); |
1560 | 0 | } |
1561 | | |
1562 | | // step 3 |
1563 | 0 | std::lock_guard rwlock(_new_tablet->get_rowset_update_lock()); |
1564 | 0 | std::lock_guard new_wlock(_new_tablet->get_header_lock()); |
1565 | 0 | SCOPED_SIMPLE_TRACE_IF_TIMEOUT(TRACE_TABLET_LOCK_THRESHOLD); |
1566 | 0 | int64_t new_max_version = _new_tablet->max_version_unlocked(); |
1567 | 0 | rowsets.clear(); |
1568 | 0 | if (max_version < new_max_version) { |
1569 | 0 | LOG(INFO) << "alter table for unique with merge-on-write, calculate delete bitmap of " |
1570 | 0 | << "incremental rowsets for version: " << max_version + 1 << "-" |
1571 | 0 | << new_max_version << " new_tablet=" << _new_tablet->tablet_id(); |
1572 | 0 | RETURN_IF_ERROR(_new_tablet->capture_consistent_rowsets_unlocked( |
1573 | 0 | {max_version + 1, new_max_version}, &rowsets)); |
1574 | 0 | } |
1575 | 0 | for (auto&& rowset_ptr : rowsets) { |
1576 | 0 | RETURN_IF_ERROR(Tablet::update_delete_bitmap_without_lock(_new_tablet, rowset_ptr)); |
1577 | 0 | } |
1578 | | // step 4 |
1579 | 0 | RETURN_IF_ERROR(_new_tablet->set_tablet_state(TabletState::TABLET_RUNNING)); |
1580 | 0 | _new_tablet->save_meta(); |
1581 | 0 | return Status::OK(); |
1582 | 0 | } |
1583 | | |
1584 | | } // namespace doris |