be/src/exec/rowid_fetcher.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "exec/rowid_fetcher.h" |
19 | | |
20 | | #include <brpc/callback.h> |
21 | | #include <butil/endpoint.h> |
22 | | #include <fmt/format.h> |
23 | | #include <gen_cpp/data.pb.h> |
24 | | #include <gen_cpp/internal_service.pb.h> |
25 | | #include <gen_cpp/olap_file.pb.h> |
26 | | #include <gen_cpp/types.pb.h> |
27 | | #include <glog/logging.h> |
28 | | #include <stddef.h> |
29 | | #include <stdint.h> |
30 | | |
31 | | #include <algorithm> |
32 | | #include <cstdint> |
33 | | #include <memory> |
34 | | #include <ostream> |
35 | | #include <string> |
36 | | #include <unordered_map> |
37 | | #include <utility> |
38 | | #include <vector> |
39 | | |
40 | | #include "bthread/countdown_event.h" |
41 | | #include "common/config.h" |
42 | | #include "common/consts.h" |
43 | | #include "common/exception.h" |
44 | | #include "common/signal_handler.h" |
45 | | #include "core/assert_cast.h" |
46 | | #include "core/block/block.h" // Block |
47 | | #include "core/column/column.h" |
48 | | #include "core/column/column_nullable.h" |
49 | | #include "core/column/column_string.h" |
50 | | #include "core/data_type/data_type_struct.h" |
51 | | #include "core/data_type_serde/data_type_serde.h" |
52 | | #include "core/string_ref.h" |
53 | | #include "exec/scan/file_scanner.h" |
54 | | #include "format/orc/vorc_reader.h" |
55 | | #include "format/parquet/vparquet_reader.h" |
56 | | #include "runtime/descriptors.h" |
57 | | #include "runtime/exec_env.h" // ExecEnv |
58 | | #include "runtime/fragment_mgr.h" // FragmentMgr |
59 | | #include "runtime/runtime_state.h" // RuntimeState |
60 | | #include "runtime/workload_group/workload_group_manager.h" |
61 | | #include "semaphore" |
62 | | #include "storage/olap_common.h" |
63 | | #include "storage/rowset/beta_rowset.h" |
64 | | #include "storage/segment/column_reader.h" |
65 | | #include "storage/storage_engine.h" |
66 | | #include "storage/tablet/tablet_fwd.h" |
67 | | #include "storage/tablet/tablet_schema.h" |
68 | | #include "storage/tablet_info.h" // DorisNodesInfo |
69 | | #include "storage/utils.h" |
70 | | #include "util/brpc_client_cache.h" // BrpcClientCache |
71 | | #include "util/defer_op.h" |
72 | | #include "util/jsonb/serialize.h" |
73 | | |
74 | | namespace doris { |
75 | | |
76 | | #include "common/compile_check_begin.h" |
77 | | |
78 | 8 | Status RowIDFetcher::init() { |
79 | 8 | DorisNodesInfo nodes_info; |
80 | 8 | nodes_info.setNodes(_fetch_option.t_fetch_opt.nodes_info); |
81 | 8 | for (auto [node_id, node_info] : nodes_info.nodes_info()) { |
82 | 8 | auto client = ExecEnv::GetInstance()->brpc_internal_client_cache()->get_client( |
83 | 8 | node_info.host, node_info.brpc_port); |
84 | 8 | if (!client) { |
85 | 0 | LOG(WARNING) << "Get rpc stub failed, host=" << node_info.host |
86 | 0 | << ", port=" << node_info.brpc_port; |
87 | 0 | return Status::InternalError("RowIDFetcher failed to init rpc client, host={}, port={}", |
88 | 0 | node_info.host, node_info.brpc_port); |
89 | 0 | } |
90 | 8 | _stubs.push_back(client); |
91 | 8 | } |
92 | 8 | return Status::OK(); |
93 | 8 | } |
94 | | |
95 | 8 | PMultiGetRequest RowIDFetcher::_init_fetch_request(const ColumnString& row_locs) const { |
96 | 8 | PMultiGetRequest mget_req; |
97 | 8 | _fetch_option.desc->to_protobuf(mget_req.mutable_desc()); |
98 | 32 | for (SlotDescriptor* slot : _fetch_option.desc->slots()) { |
99 | | // ignore rowid |
100 | 32 | if (slot->col_name() == BeConsts::ROWID_COL) { |
101 | 8 | continue; |
102 | 8 | } |
103 | 24 | slot->to_protobuf(mget_req.add_slots()); |
104 | 24 | } |
105 | 33 | for (size_t i = 0; i < row_locs.size(); ++i) { |
106 | 25 | PRowLocation row_loc; |
107 | 25 | StringRef row_id_rep = row_locs.get_data_at(i); |
108 | | // TODO: When transferring data between machines with different byte orders (endianness), |
109 | | // not performing proper handling may lead to issues in parsing and exchanging the data. |
110 | 25 | auto location = reinterpret_cast<const GlobalRowLoacation*>(row_id_rep.data); |
111 | 25 | row_loc.set_tablet_id(location->tablet_id); |
112 | 25 | row_loc.set_rowset_id(location->row_location.rowset_id.to_string()); |
113 | 25 | row_loc.set_segment_id(location->row_location.segment_id); |
114 | 25 | row_loc.set_ordinal_id(location->row_location.row_id); |
115 | 25 | *mget_req.add_row_locs() = std::move(row_loc); |
116 | 25 | } |
117 | | // Set column desc |
118 | 30 | for (const TColumn& tcolumn : _fetch_option.t_fetch_opt.column_desc) { |
119 | 30 | TabletColumn column(tcolumn); |
120 | 30 | column.to_schema_pb(mget_req.add_column_desc()); |
121 | 30 | } |
122 | 8 | PUniqueId& query_id = *mget_req.mutable_query_id(); |
123 | 8 | query_id.set_hi(_fetch_option.runtime_state->query_id().hi); |
124 | 8 | query_id.set_lo(_fetch_option.runtime_state->query_id().lo); |
125 | 8 | mget_req.set_be_exec_version(_fetch_option.runtime_state->be_exec_version()); |
126 | 8 | mget_req.set_fetch_row_store(_fetch_option.t_fetch_opt.fetch_row_store); |
127 | 8 | return mget_req; |
128 | 8 | } |
129 | | |
130 | | Status RowIDFetcher::_merge_rpc_results(const PMultiGetRequest& request, |
131 | | const std::vector<PMultiGetResponse>& rsps, |
132 | | const std::vector<brpc::Controller>& cntls, |
133 | | Block* output_block, |
134 | 8 | std::vector<PRowLocation>* rows_id) const { |
135 | 8 | output_block->clear(); |
136 | 8 | for (const auto& cntl : cntls) { |
137 | 8 | if (cntl.Failed()) { |
138 | 0 | LOG(WARNING) << "Failed to fetch meet rpc error:" << cntl.ErrorText() |
139 | 0 | << ", host:" << cntl.remote_side(); |
140 | 0 | return Status::InternalError(cntl.ErrorText()); |
141 | 0 | } |
142 | 8 | } |
143 | 8 | DataTypeSerDeSPtrs serdes; |
144 | 8 | std::unordered_map<uint32_t, uint32_t> col_uid_to_idx; |
145 | 8 | std::vector<std::string> default_values; |
146 | 8 | default_values.resize(_fetch_option.desc->slots().size()); |
147 | 8 | auto merge_function = [&](const PMultiGetResponse& resp) { |
148 | 8 | Status st(Status::create(resp.status())); |
149 | 8 | if (!st.ok()) { |
150 | 0 | LOG(WARNING) << "Failed to fetch " << st.to_string(); |
151 | 0 | return st; |
152 | 0 | } |
153 | 25 | for (const PRowLocation& row_id : resp.row_locs()) { |
154 | 25 | rows_id->push_back(row_id); |
155 | 25 | } |
156 | | // Merge binary rows |
157 | 8 | if (request.fetch_row_store()) { |
158 | 0 | CHECK(resp.row_locs().size() == resp.binary_row_data_size()); |
159 | 0 | if (output_block->is_empty_column()) { |
160 | 0 | *output_block = Block(_fetch_option.desc->slots(), 1); |
161 | 0 | } |
162 | 0 | if (serdes.empty() && col_uid_to_idx.empty()) { |
163 | 0 | serdes = create_data_type_serdes(_fetch_option.desc->slots()); |
164 | 0 | for (int i = 0; i < _fetch_option.desc->slots().size(); ++i) { |
165 | 0 | col_uid_to_idx[_fetch_option.desc->slots()[i]->col_unique_id()] = i; |
166 | 0 | default_values[i] = _fetch_option.desc->slots()[i]->col_default_value(); |
167 | 0 | } |
168 | 0 | } |
169 | 0 | for (int i = 0; i < resp.binary_row_data_size(); ++i) { |
170 | 0 | RETURN_IF_ERROR(JsonbSerializeUtil::jsonb_to_block( |
171 | 0 | serdes, resp.binary_row_data(i).data(), resp.binary_row_data(i).size(), |
172 | 0 | col_uid_to_idx, *output_block, default_values, {})); |
173 | 0 | } |
174 | 0 | return Status::OK(); |
175 | 0 | } |
176 | | // Merge partial blocks |
177 | 8 | Block partial_block; |
178 | 8 | [[maybe_unused]] size_t uncompressed_size = 0; |
179 | 8 | [[maybe_unused]] int64_t uncompressed_time = 0; |
180 | | |
181 | 8 | RETURN_IF_ERROR( |
182 | 8 | partial_block.deserialize(resp.block(), &uncompressed_size, &uncompressed_time)); |
183 | 8 | if (partial_block.is_empty_column()) { |
184 | 0 | return Status::OK(); |
185 | 0 | } |
186 | 8 | CHECK(resp.row_locs().size() == partial_block.rows()); |
187 | 8 | if (output_block->is_empty_column()) { |
188 | 8 | output_block->swap(partial_block); |
189 | 8 | } else if (partial_block.columns() != output_block->columns()) { |
190 | 0 | return Status::Error<ErrorCode::INTERNAL_ERROR>( |
191 | 0 | "Merge block not match, self:[{}], input:[{}], ", output_block->dump_types(), |
192 | 0 | partial_block.dump_types()); |
193 | 0 | } else { |
194 | 0 | for (int i = 0; i < output_block->columns(); ++i) { |
195 | 0 | output_block->get_by_position(i).column->assume_mutable()->insert_range_from( |
196 | 0 | *partial_block.get_by_position(i) |
197 | 0 | .column->convert_to_full_column_if_const() |
198 | 0 | .get(), |
199 | 0 | 0, partial_block.rows()); |
200 | 0 | } |
201 | 0 | } |
202 | 8 | return Status::OK(); |
203 | 8 | }; |
204 | | |
205 | 8 | for (const auto& resp : rsps) { |
206 | 8 | RETURN_IF_ERROR(merge_function(resp)); |
207 | 8 | } |
208 | 8 | return Status::OK(); |
209 | 8 | } |
210 | | |
211 | 19.3k | bool _has_char_type(const DataTypePtr& type) { |
212 | 19.3k | switch (type->get_primitive_type()) { |
213 | 541 | case TYPE_CHAR: { |
214 | 541 | return true; |
215 | 0 | } |
216 | 1.49k | case TYPE_ARRAY: { |
217 | 1.49k | const auto* arr_type = assert_cast<const DataTypeArray*>(remove_nullable(type).get()); |
218 | 1.49k | return _has_char_type(arr_type->get_nested_type()); |
219 | 0 | } |
220 | 370 | case TYPE_MAP: { |
221 | 370 | const auto* map_type = assert_cast<const DataTypeMap*>(remove_nullable(type).get()); |
222 | 370 | return _has_char_type(map_type->get_key_type()) || |
223 | 370 | _has_char_type(map_type->get_value_type()); |
224 | 0 | } |
225 | 271 | case TYPE_STRUCT: { |
226 | 271 | const auto* struct_type = assert_cast<const DataTypeStruct*>(remove_nullable(type).get()); |
227 | 271 | return std::any_of(struct_type->get_elements().begin(), struct_type->get_elements().end(), |
228 | 542 | [&](const DataTypePtr& dt) -> bool { return _has_char_type(dt); }); |
229 | 0 | } |
230 | 16.7k | default: |
231 | 16.7k | return false; |
232 | 19.3k | } |
233 | 19.3k | } |
234 | | |
235 | 8 | Status RowIDFetcher::fetch(const ColumnPtr& column_row_ids, Block* res_block) { |
236 | 8 | CHECK(!_stubs.empty()); |
237 | 8 | PMultiGetRequest mget_req = _init_fetch_request( |
238 | 8 | assert_cast<const ColumnString&>(*remove_nullable(column_row_ids).get())); |
239 | 8 | std::vector<PMultiGetResponse> resps(_stubs.size()); |
240 | 8 | std::vector<brpc::Controller> cntls(_stubs.size()); |
241 | 8 | bthread::CountdownEvent counter(cast_set<int>(_stubs.size())); |
242 | 16 | for (size_t i = 0; i < _stubs.size(); ++i) { |
243 | 8 | cntls[i].set_timeout_ms(_fetch_option.runtime_state->execution_timeout() * 1000); |
244 | 8 | auto callback = brpc::NewCallback(fetch_callback, &counter); |
245 | 8 | _stubs[i]->multiget_data(&cntls[i], &mget_req, &resps[i], callback); |
246 | 8 | } |
247 | 8 | counter.wait(); |
248 | | |
249 | | // Merge |
250 | 8 | std::vector<PRowLocation> rows_locs; |
251 | 8 | rows_locs.reserve(rows_locs.size()); |
252 | 8 | RETURN_IF_ERROR(_merge_rpc_results(mget_req, resps, cntls, res_block, &rows_locs)); |
253 | 8 | if (rows_locs.size() < column_row_ids->size()) { |
254 | 0 | return Status::InternalError("Miss matched return row loc count {}, expected {}, input {}", |
255 | 0 | rows_locs.size(), res_block->rows(), column_row_ids->size()); |
256 | 0 | } |
257 | | // Final sort by row_ids sequence, since row_ids is already sorted if need |
258 | 8 | std::map<GlobalRowLoacation, size_t> positions; |
259 | 33 | for (size_t i = 0; i < rows_locs.size(); ++i) { |
260 | 25 | RowsetId rowset_id; |
261 | 25 | rowset_id.init(rows_locs[i].rowset_id()); |
262 | 25 | GlobalRowLoacation grl(rows_locs[i].tablet_id(), rowset_id, |
263 | 25 | cast_set<uint32_t>(rows_locs[i].segment_id()), |
264 | 25 | cast_set<uint32_t>(rows_locs[i].ordinal_id())); |
265 | 25 | positions[grl] = i; |
266 | 25 | }; |
267 | | // TODO remove this warning code |
268 | 8 | if (positions.size() < rows_locs.size()) { |
269 | 0 | LOG(WARNING) << "cwntains duplicated row entry"; |
270 | 0 | } |
271 | 8 | IColumn::Permutation permutation; |
272 | 8 | permutation.reserve(column_row_ids->size()); |
273 | 33 | for (size_t i = 0; i < column_row_ids->size(); ++i) { |
274 | 25 | auto location = |
275 | 25 | reinterpret_cast<const GlobalRowLoacation*>(column_row_ids->get_data_at(i).data); |
276 | 25 | permutation.push_back(positions[*location]); |
277 | 25 | } |
278 | 32 | for (size_t i = 0; i < res_block->columns(); ++i) { |
279 | 24 | res_block->get_by_position(i).column = |
280 | 24 | res_block->get_by_position(i).column->permute(permutation, permutation.size()); |
281 | 24 | } |
282 | | // Check row consistency |
283 | 8 | RETURN_IF_CATCH_EXCEPTION(res_block->check_number_of_rows()); |
284 | | // shrink for char type |
285 | 8 | std::vector<size_t> char_type_idx; |
286 | 40 | for (size_t i = 0; i < _fetch_option.desc->slots().size(); i++) { |
287 | 32 | const auto& column_desc = _fetch_option.desc->slots()[i]; |
288 | 32 | const auto type = column_desc->type(); |
289 | 32 | if (_has_char_type(type)) { |
290 | 0 | char_type_idx.push_back(i); |
291 | 0 | } |
292 | 32 | } |
293 | 8 | res_block->shrink_char_type_column_suffix_zero(char_type_idx); |
294 | 8 | VLOG_DEBUG << "dump block:" << res_block->dump_data(0, 10); |
295 | 8 | return Status::OK(); |
296 | 8 | } |
297 | | |
298 | | struct IteratorKey { |
299 | | int64_t tablet_id; |
300 | | RowsetId rowset_id; |
301 | | uint64_t segment_id; |
302 | | int slot_id; |
303 | | |
304 | | // unordered map std::equal_to |
305 | 22.4k | bool operator==(const IteratorKey& rhs) const { |
306 | 22.4k | return tablet_id == rhs.tablet_id && rowset_id == rhs.rowset_id && |
307 | 22.4k | segment_id == rhs.segment_id && slot_id == rhs.slot_id; |
308 | 22.4k | } |
309 | | }; |
310 | | |
311 | | struct SegKey { |
312 | | int64_t tablet_id; |
313 | | RowsetId rowset_id; |
314 | | uint64_t segment_id; |
315 | | |
316 | | // unordered map std::equal_to |
317 | 7.99k | bool operator==(const SegKey& rhs) const { |
318 | 7.99k | return tablet_id == rhs.tablet_id && rowset_id == rhs.rowset_id && |
319 | 7.99k | segment_id == rhs.segment_id; |
320 | 7.99k | } |
321 | | }; |
322 | | |
323 | | struct HashOfSegKey { |
324 | 13.6k | size_t operator()(const SegKey& key) const { |
325 | 13.6k | size_t seed = 0; |
326 | 13.6k | seed = HashUtil::hash64(&key.tablet_id, sizeof(key.tablet_id), seed); |
327 | 13.6k | seed = HashUtil::hash64(&key.rowset_id.hi, sizeof(key.rowset_id.hi), seed); |
328 | 13.6k | seed = HashUtil::hash64(&key.rowset_id.mi, sizeof(key.rowset_id.mi), seed); |
329 | 13.6k | seed = HashUtil::hash64(&key.rowset_id.lo, sizeof(key.rowset_id.lo), seed); |
330 | 13.6k | seed = HashUtil::hash64(&key.segment_id, sizeof(key.segment_id), seed); |
331 | 13.6k | return seed; |
332 | 13.6k | } |
333 | | }; |
334 | | |
335 | | struct HashOfIteratorKey { |
336 | 34.3k | size_t operator()(const IteratorKey& key) const { |
337 | 34.3k | size_t seed = 0; |
338 | 34.3k | seed = HashUtil::hash64(&key.tablet_id, sizeof(key.tablet_id), seed); |
339 | 34.3k | seed = HashUtil::hash64(&key.rowset_id.hi, sizeof(key.rowset_id.hi), seed); |
340 | 34.3k | seed = HashUtil::hash64(&key.rowset_id.mi, sizeof(key.rowset_id.mi), seed); |
341 | 34.3k | seed = HashUtil::hash64(&key.rowset_id.lo, sizeof(key.rowset_id.lo), seed); |
342 | 34.3k | seed = HashUtil::hash64(&key.segment_id, sizeof(key.segment_id), seed); |
343 | 34.3k | seed = HashUtil::hash64(&key.slot_id, sizeof(key.slot_id), seed); |
344 | 34.3k | return seed; |
345 | 34.3k | } |
346 | | }; |
347 | | |
348 | | struct IteratorItem { |
349 | | std::unique_ptr<ColumnIterator> iterator; |
350 | | SegmentSharedPtr segment; |
351 | | // for holding the reference of storage read options to avoid use after release |
352 | | StorageReadOptions storage_read_options; |
353 | | }; |
354 | | |
355 | | struct SegItem { |
356 | | BaseTabletSPtr tablet; |
357 | | BetaRowsetSharedPtr rowset; |
358 | | // for holding the reference of segment to avoid use after release |
359 | | SegmentSharedPtr segment; |
360 | | }; |
361 | | |
362 | | Status RowIdStorageReader::read_by_rowids(const PMultiGetRequest& request, |
363 | 8 | PMultiGetResponse* response) { |
364 | | // read from storage engine row id by row id |
365 | 8 | OlapReaderStatistics stats; |
366 | 8 | Block result_block; |
367 | 8 | int64_t acquire_tablet_ms = 0; |
368 | 8 | int64_t acquire_rowsets_ms = 0; |
369 | 8 | int64_t acquire_segments_ms = 0; |
370 | 8 | int64_t lookup_row_data_ms = 0; |
371 | | |
372 | | // init desc |
373 | 8 | std::vector<SlotDescriptor> slots; |
374 | 8 | slots.reserve(request.slots().size()); |
375 | 24 | for (const auto& pslot : request.slots()) { |
376 | 24 | slots.push_back(SlotDescriptor(pslot)); |
377 | 24 | } |
378 | | |
379 | | // init read schema |
380 | 8 | TabletSchema full_read_schema; |
381 | 30 | for (const ColumnPB& column_pb : request.column_desc()) { |
382 | 30 | full_read_schema.append_column(TabletColumn(column_pb)); |
383 | 30 | } |
384 | | |
385 | 8 | std::unordered_map<IteratorKey, IteratorItem, HashOfIteratorKey> iterator_map; |
386 | | // read row by row |
387 | 33 | for (int i = 0; i < request.row_locs_size(); ++i) { |
388 | 25 | const auto& row_loc = request.row_locs(i); |
389 | 25 | MonotonicStopWatch watch; |
390 | 25 | watch.start(); |
391 | 25 | BaseTabletSPtr tablet = scope_timer_run( |
392 | 25 | [&]() { |
393 | 25 | auto res = ExecEnv::get_tablet(row_loc.tablet_id(), nullptr, true); |
394 | 25 | return !res.has_value() ? nullptr |
395 | 25 | : std::dynamic_pointer_cast<BaseTablet>(res.value()); |
396 | 25 | }, |
397 | 25 | &acquire_tablet_ms); |
398 | 25 | RowsetId rowset_id; |
399 | 25 | rowset_id.init(row_loc.rowset_id()); |
400 | 25 | if (!tablet) { |
401 | 0 | continue; |
402 | 0 | } |
403 | | // We ensured it's rowset is not released when init Tablet reader param, rowset->update_delayed_expired_timestamp(); |
404 | 25 | BetaRowsetSharedPtr rowset = std::static_pointer_cast<BetaRowset>(scope_timer_run( |
405 | 25 | [&]() { |
406 | 25 | return ExecEnv::GetInstance()->storage_engine().get_quering_rowset(rowset_id); |
407 | 25 | }, |
408 | 25 | &acquire_rowsets_ms)); |
409 | 25 | if (!rowset) { |
410 | 0 | LOG(INFO) << "no such rowset " << rowset_id; |
411 | 0 | continue; |
412 | 0 | } |
413 | 25 | size_t row_size = 0; |
414 | 25 | Defer _defer([&]() { |
415 | 25 | LOG_EVERY_N(INFO, 100) |
416 | 1 | << "multiget_data single_row, cost(us):" << watch.elapsed_time() / 1000 |
417 | 1 | << ", row_size:" << row_size; |
418 | 25 | *response->add_row_locs() = row_loc; |
419 | 25 | }); |
420 | | // TODO: supoort session variable enable_page_cache and disable_file_cache if necessary. |
421 | 25 | SegmentCacheHandle segment_cache; |
422 | 25 | RETURN_IF_ERROR(scope_timer_run( |
423 | 25 | [&]() { |
424 | 25 | return SegmentLoader::instance()->load_segments(rowset, &segment_cache, true); |
425 | 25 | }, |
426 | 25 | &acquire_segments_ms)); |
427 | | // find segment |
428 | 25 | auto it = std::find_if(segment_cache.get_segments().cbegin(), |
429 | 25 | segment_cache.get_segments().cend(), |
430 | 25 | [&row_loc](const segment_v2::SegmentSharedPtr& seg) { |
431 | 25 | return seg->id() == row_loc.segment_id(); |
432 | 25 | }); |
433 | 25 | if (it == segment_cache.get_segments().end()) { |
434 | 0 | continue; |
435 | 0 | } |
436 | 25 | segment_v2::SegmentSharedPtr segment = *it; |
437 | 25 | GlobalRowLoacation row_location(row_loc.tablet_id(), rowset->rowset_id(), |
438 | 25 | cast_set<uint32_t>(row_loc.segment_id()), |
439 | 25 | cast_set<uint32_t>(row_loc.ordinal_id())); |
440 | | // fetch by row store, more effcient way |
441 | 25 | if (request.fetch_row_store()) { |
442 | 0 | if (!tablet->tablet_schema()->has_row_store_for_all_columns()) { |
443 | 0 | return Status::InternalError("Tablet {} does not have row store for all columns", |
444 | 0 | tablet->tablet_id()); |
445 | 0 | } |
446 | 0 | RowLocation loc(rowset_id, segment->id(), cast_set<uint32_t>(row_loc.ordinal_id())); |
447 | 0 | std::string* value = response->add_binary_row_data(); |
448 | 0 | RETURN_IF_ERROR(scope_timer_run( |
449 | 0 | [&]() { return tablet->lookup_row_data({}, loc, rowset, stats, *value); }, |
450 | 0 | &lookup_row_data_ms)); |
451 | 0 | row_size = value->size(); |
452 | 0 | continue; |
453 | 0 | } |
454 | | |
455 | | // fetch by column store |
456 | 25 | if (result_block.is_empty_column()) { |
457 | 8 | result_block = Block(slots, request.row_locs().size()); |
458 | 8 | } |
459 | 25 | VLOG_DEBUG << "Read row location " |
460 | 0 | << fmt::format("{}, {}, {}, {}", row_location.tablet_id, |
461 | 0 | row_location.row_location.rowset_id.to_string(), |
462 | 0 | row_location.row_location.segment_id, |
463 | 0 | row_location.row_location.row_id); |
464 | 99 | for (int x = 0; x < slots.size(); ++x) { |
465 | 74 | auto row_id = static_cast<segment_v2::rowid_t>(row_loc.ordinal_id()); |
466 | 74 | MutableColumnPtr column = result_block.get_by_position(x).column->assume_mutable(); |
467 | 74 | IteratorKey iterator_key {.tablet_id = tablet->tablet_id(), |
468 | 74 | .rowset_id = rowset_id, |
469 | 74 | .segment_id = row_loc.segment_id(), |
470 | 74 | .slot_id = slots[x].id()}; |
471 | 74 | IteratorItem& iterator_item = iterator_map[iterator_key]; |
472 | 74 | if (iterator_item.segment == nullptr) { |
473 | | // hold the reference |
474 | 30 | iterator_map[iterator_key].segment = segment; |
475 | 30 | iterator_item.storage_read_options.stats = &stats; |
476 | 30 | iterator_item.storage_read_options.io_ctx.reader_type = ReaderType::READER_QUERY; |
477 | 30 | } |
478 | 74 | segment = iterator_item.segment; |
479 | 74 | RETURN_IF_ERROR(segment->seek_and_read_by_rowid( |
480 | 74 | full_read_schema, &slots[x], row_id, column, iterator_item.storage_read_options, |
481 | 74 | iterator_item.iterator)); |
482 | 74 | } |
483 | 25 | } |
484 | | // serialize block if not empty |
485 | 8 | if (!result_block.is_empty_column()) { |
486 | 8 | VLOG_DEBUG << "dump block:" << result_block.dump_data(0, 10) |
487 | 0 | << ", be_exec_version:" << request.be_exec_version(); |
488 | 8 | [[maybe_unused]] size_t compressed_size = 0; |
489 | 8 | [[maybe_unused]] size_t uncompressed_size = 0; |
490 | 8 | [[maybe_unused]] int64_t compress_time = 0; |
491 | 8 | int be_exec_version = request.has_be_exec_version() ? request.be_exec_version() : 0; |
492 | 8 | RETURN_IF_ERROR(result_block.serialize(be_exec_version, response->mutable_block(), |
493 | 8 | &uncompressed_size, &compressed_size, &compress_time, |
494 | 8 | segment_v2::CompressionTypePB::LZ4)); |
495 | 8 | } |
496 | | |
497 | 8 | LOG(INFO) << "Query stats: " |
498 | 8 | << fmt::format( |
499 | 8 | "query_id:{}, " |
500 | 8 | "hit_cached_pages:{}, total_pages_read:{}, compressed_bytes_read:{}, " |
501 | 8 | "io_latency:{}ns, " |
502 | 8 | "uncompressed_bytes_read:{}," |
503 | 8 | "bytes_read:{}," |
504 | 8 | "acquire_tablet_ms:{}, acquire_rowsets_ms:{}, acquire_segments_ms:{}, " |
505 | 8 | "lookup_row_data_ms:{}", |
506 | 8 | print_id(request.query_id()), stats.cached_pages_num, |
507 | 8 | stats.total_pages_num, stats.compressed_bytes_read, stats.io_ns, |
508 | 8 | stats.uncompressed_bytes_read, stats.bytes_read, acquire_tablet_ms, |
509 | 8 | acquire_rowsets_ms, acquire_segments_ms, lookup_row_data_ms); |
510 | 8 | return Status::OK(); |
511 | 8 | } |
512 | | |
513 | | Status RowIdStorageReader::read_by_rowids(const PMultiGetRequestV2& request, |
514 | 2.97k | PMultiGetResponseV2* response) { |
515 | 2.97k | if (request.request_block_descs_size()) { |
516 | 2.97k | auto tquery_id = ((UniqueId)request.query_id()).to_thrift(); |
517 | | // todo: use mutableBlock instead of block |
518 | 2.97k | std::vector<Block> result_blocks(request.request_block_descs_size()); |
519 | | |
520 | 2.97k | OlapReaderStatistics stats; |
521 | 2.97k | int64_t acquire_tablet_ms = 0; |
522 | 2.97k | int64_t acquire_rowsets_ms = 0; |
523 | 2.97k | int64_t acquire_segments_ms = 0; |
524 | 2.97k | int64_t lookup_row_data_ms = 0; |
525 | | |
526 | 2.97k | int64_t external_init_reader_avg_ms = 0; |
527 | 2.97k | int64_t external_get_block_avg_ms = 0; |
528 | 2.97k | size_t external_scan_range_cnt = 0; |
529 | | |
530 | | // Add counters for different file mapping types |
531 | 2.97k | std::unordered_map<FileMappingType, int64_t> file_type_counts; |
532 | | |
533 | 2.97k | auto id_file_map = |
534 | 2.97k | ExecEnv::GetInstance()->get_id_manager()->get_id_file_map(request.query_id()); |
535 | | // if id_file_map is null, means the BE not have scan range, just return ok |
536 | 2.97k | if (!id_file_map) { |
537 | | // padding empty block to response |
538 | 0 | for (int i = 0; i < request.request_block_descs_size(); ++i) { |
539 | 0 | response->add_blocks(); |
540 | 0 | } |
541 | 0 | return Status::OK(); |
542 | 0 | } |
543 | | |
544 | 6.03k | for (int i = 0; i < request.request_block_descs_size(); ++i) { |
545 | 3.06k | const auto& request_block_desc = request.request_block_descs(i); |
546 | 3.06k | PMultiGetBlockV2* pblock = response->add_blocks(); |
547 | 3.06k | if (request_block_desc.row_id_size() >= 1) { |
548 | | // Since this block belongs to the same table, we only need to take the first type for judgment. |
549 | 2.73k | auto first_file_id = request_block_desc.file_id(0); |
550 | 2.73k | auto first_file_mapping = id_file_map->get_file_mapping(first_file_id); |
551 | 2.73k | if (!first_file_mapping) { |
552 | 0 | return Status::InternalError( |
553 | 0 | "Backend:{} file_mapping not found, query_id: {}, file_id: {}", |
554 | 0 | BackendOptions::get_localhost(), print_id(request.query_id()), |
555 | 0 | first_file_id); |
556 | 0 | } |
557 | 2.73k | file_type_counts[first_file_mapping->type] += request_block_desc.row_id_size(); |
558 | | |
559 | | // prepare slots to build block |
560 | 2.73k | std::vector<SlotDescriptor> slots; |
561 | 2.73k | slots.reserve(request_block_desc.slots_size()); |
562 | 16.5k | for (const auto& pslot : request_block_desc.slots()) { |
563 | 16.5k | slots.push_back(SlotDescriptor(pslot)); |
564 | 16.5k | } |
565 | | // prepare block char vector shrink for char type |
566 | 2.73k | std::vector<size_t> char_type_idx; |
567 | 19.3k | for (int j = 0; j < slots.size(); ++j) { |
568 | 16.5k | auto slot = slots[j]; |
569 | 16.5k | if (_has_char_type(slot.type())) { |
570 | 541 | char_type_idx.push_back(j); |
571 | 541 | } |
572 | 16.5k | } |
573 | | |
574 | 2.73k | try { |
575 | 2.73k | if (first_file_mapping->type == FileMappingType::INTERNAL) { |
576 | 700 | RETURN_IF_ERROR(read_batch_doris_format_row( |
577 | 700 | request_block_desc, id_file_map, slots, tquery_id, result_blocks[i], |
578 | 700 | stats, &acquire_tablet_ms, &acquire_rowsets_ms, |
579 | 700 | &acquire_segments_ms, &lookup_row_data_ms)); |
580 | 2.03k | } else { |
581 | 2.03k | RETURN_IF_ERROR(read_batch_external_row( |
582 | 2.03k | request.wg_id(), request_block_desc, id_file_map, slots, |
583 | 2.03k | first_file_mapping, tquery_id, result_blocks[i], |
584 | 2.03k | pblock->mutable_profile(), &external_init_reader_avg_ms, |
585 | 2.03k | &external_get_block_avg_ms, &external_scan_range_cnt)); |
586 | 2.03k | } |
587 | 2.73k | } catch (const Exception& e) { |
588 | 0 | return Status::Error<false>(e.code(), "Row id fetch failed because {}", |
589 | 0 | e.what()); |
590 | 0 | } |
591 | | |
592 | | // after read the block, shrink char type block |
593 | 2.73k | result_blocks[i].shrink_char_type_column_suffix_zero(char_type_idx); |
594 | 2.73k | } |
595 | | |
596 | 3.06k | [[maybe_unused]] size_t compressed_size = 0; |
597 | 3.06k | [[maybe_unused]] size_t uncompressed_size = 0; |
598 | 3.06k | [[maybe_unused]] int64_t compress_time = 0; |
599 | 18.4E | int be_exec_version = request.has_be_exec_version() ? request.be_exec_version() : 0; |
600 | 3.06k | RETURN_IF_ERROR(result_blocks[i].serialize( |
601 | 3.06k | be_exec_version, pblock->mutable_block(), &uncompressed_size, &compressed_size, |
602 | 3.06k | &compress_time, segment_v2::CompressionTypePB::LZ4)); |
603 | 3.06k | } |
604 | | |
605 | | // Build file type statistics string |
606 | 2.97k | std::string file_type_stats; |
607 | 2.97k | for (const auto& [type, count] : file_type_counts) { |
608 | 2.65k | if (!file_type_stats.empty()) { |
609 | 0 | file_type_stats += ", "; |
610 | 0 | } |
611 | 2.65k | file_type_stats += fmt::format("{}:{}", type, count); |
612 | 2.65k | } |
613 | | |
614 | 2.97k | LOG(INFO) << "Query stats: " |
615 | 2.97k | << fmt::format( |
616 | 2.97k | "query_id:{}, " |
617 | 2.97k | "Internal table:" |
618 | 2.97k | "hit_cached_pages:{}, total_pages_read:{}, compressed_bytes_read:{}, " |
619 | 2.97k | "io_latency:{}ns, uncompressed_bytes_read:{}, bytes_read:{}, " |
620 | 2.97k | "acquire_tablet_ms:{}, acquire_rowsets_ms:{}, acquire_segments_ms:{}, " |
621 | 2.97k | "lookup_row_data_ms:{}, file_types:[{}]; " |
622 | 2.97k | "External table : init_reader_ms:{}, get_block_ms:{}, " |
623 | 2.97k | "external_scan_range_cnt:{}", |
624 | 2.97k | print_id(request.query_id()), stats.cached_pages_num, |
625 | 2.97k | stats.total_pages_num, stats.compressed_bytes_read, stats.io_ns, |
626 | 2.97k | stats.uncompressed_bytes_read, stats.bytes_read, acquire_tablet_ms, |
627 | 2.97k | acquire_rowsets_ms, acquire_segments_ms, lookup_row_data_ms, |
628 | 2.97k | file_type_stats, external_init_reader_avg_ms, |
629 | 2.97k | external_get_block_avg_ms, external_scan_range_cnt); |
630 | 2.97k | } |
631 | | |
632 | 2.97k | if (request.has_gc_id_map() && request.gc_id_map()) { |
633 | 1.90k | ExecEnv::GetInstance()->get_id_manager()->remove_id_file_map(request.query_id()); |
634 | 1.90k | } |
635 | | |
636 | 2.97k | return Status::OK(); |
637 | 2.97k | } |
638 | | |
639 | | Status RowIdStorageReader::read_batch_doris_format_row( |
640 | | const PRequestBlockDesc& request_block_desc, std::shared_ptr<IdFileMap> id_file_map, |
641 | | std::vector<SlotDescriptor>& slots, const TUniqueId& query_id, Block& result_block, |
642 | | OlapReaderStatistics& stats, int64_t* acquire_tablet_ms, int64_t* acquire_rowsets_ms, |
643 | 700 | int64_t* acquire_segments_ms, int64_t* lookup_row_data_ms) { |
644 | 700 | if (result_block.is_empty_column()) [[likely]] { |
645 | 700 | result_block = Block(slots, request_block_desc.row_id_size()); |
646 | 700 | } |
647 | 700 | TabletSchema full_read_schema; |
648 | 4.84k | for (const ColumnPB& column_pb : request_block_desc.column_descs()) { |
649 | 4.84k | full_read_schema.append_column(TabletColumn(column_pb)); |
650 | 4.84k | } |
651 | | |
652 | 700 | std::unordered_map<IteratorKey, IteratorItem, HashOfIteratorKey> iterator_map; |
653 | 700 | std::unordered_map<SegKey, SegItem, HashOfSegKey> seg_map; |
654 | 700 | std::string row_store_buffer; |
655 | 700 | RowStoreReadStruct row_store_read_struct(row_store_buffer); |
656 | 700 | if (request_block_desc.fetch_row_store()) { |
657 | 2.72k | for (int i = 0; i < request_block_desc.slots_size(); ++i) { |
658 | 2.55k | row_store_read_struct.serdes.emplace_back(slots[i].get_data_type_ptr()->get_serde()); |
659 | 2.55k | row_store_read_struct.col_uid_to_idx[slots[i].col_unique_id()] = i; |
660 | 2.55k | row_store_read_struct.default_values.emplace_back(slots[i].col_default_value()); |
661 | 2.55k | } |
662 | 170 | } |
663 | | |
664 | 700 | std::vector<uint32_t> row_ids; |
665 | 700 | int k = 1; |
666 | 700 | auto max_k = 0; |
667 | 7.54k | for (int j = 0; j < request_block_desc.row_id_size();) { |
668 | 6.84k | auto file_id = request_block_desc.file_id(j); |
669 | 6.84k | row_ids.emplace_back(request_block_desc.row_id(j)); |
670 | 6.84k | auto file_mapping = id_file_map->get_file_mapping(file_id); |
671 | 6.84k | if (!file_mapping) { |
672 | 0 | return Status::InternalError( |
673 | 0 | "Backend:{} file_mapping not found, query_id: {}, file_id: {}", |
674 | 0 | BackendOptions::get_localhost(), print_id(query_id), file_id); |
675 | 0 | } |
676 | 9.77k | for (k = 1; j + k < request_block_desc.row_id_size(); ++k) { |
677 | 9.07k | if (request_block_desc.file_id(j + k) == file_id) { |
678 | 2.93k | row_ids.emplace_back(request_block_desc.row_id(j + k)); |
679 | 6.14k | } else { |
680 | 6.14k | break; |
681 | 6.14k | } |
682 | 9.07k | } |
683 | | |
684 | 6.84k | RETURN_IF_ERROR(read_doris_format_row( |
685 | 6.84k | id_file_map, file_mapping, row_ids, slots, full_read_schema, row_store_read_struct, |
686 | 6.84k | stats, acquire_tablet_ms, acquire_rowsets_ms, acquire_segments_ms, |
687 | 6.84k | lookup_row_data_ms, seg_map, iterator_map, result_block)); |
688 | | |
689 | 6.84k | j += k; |
690 | 6.84k | max_k = std::max(max_k, k); |
691 | 6.84k | row_ids.clear(); |
692 | 6.84k | } |
693 | | |
694 | 700 | return Status::OK(); |
695 | 700 | } |
696 | | |
697 | | const std::string RowIdStorageReader::ScannersRunningTimeProfile = "ScannersRunningTime"; |
698 | | const std::string RowIdStorageReader::InitReaderAvgTimeProfile = "InitReaderAvgTime"; |
699 | | const std::string RowIdStorageReader::GetBlockAvgTimeProfile = "GetBlockAvgTime"; |
700 | | const std::string RowIdStorageReader::FileReadLinesProfile = "FileReadLines"; |
701 | | |
702 | | Status RowIdStorageReader::read_external_row_from_file_mapping( |
703 | | size_t idx, const std::multimap<segment_v2::rowid_t, size_t>& row_ids, |
704 | | const std::shared_ptr<FileMapping>& file_mapping, const std::vector<SlotDescriptor>& slots, |
705 | | const TUniqueId& query_id, const std::shared_ptr<RuntimeState>& runtime_state, |
706 | | std::vector<Block>& scan_blocks, std::vector<std::pair<size_t, size_t>>& row_id_block_idx, |
707 | | std::vector<RowIdStorageReader::ExternalFetchStatistics>& fetch_statistics, |
708 | | const TFileScanRangeParams& rpc_scan_params, |
709 | | const std::unordered_map<std::string, int>& colname_to_slot_id, |
710 | | std::atomic<int>& producer_count, size_t scan_rows_count, |
711 | | std::counting_semaphore<>& semaphore, std::condition_variable& cv, std::mutex& mtx, |
712 | 4.20k | TupleDescriptor& tuple_desc) { |
713 | 4.20k | SCOPED_ATTACH_TASK(ExecEnv::GetInstance()->rowid_storage_reader_tracker()); |
714 | 4.20k | signal::set_signal_task_id(query_id); |
715 | | |
716 | 4.20k | std::list<int64_t> read_ids; |
717 | | //Generate an ordered list with the help of the orderliness of the map. |
718 | 16.1k | for (const auto& [row_id, result_block_idx] : row_ids) { |
719 | 16.1k | if (read_ids.empty() || read_ids.back() != row_id) { |
720 | 14.0k | read_ids.emplace_back(row_id); |
721 | 14.0k | } |
722 | 16.1k | row_id_block_idx[result_block_idx] = std::make_pair(idx, read_ids.size() - 1); |
723 | 16.1k | } |
724 | | |
725 | 4.20k | scan_blocks[idx] = Block(slots, read_ids.size()); |
726 | | |
727 | 4.20k | auto& external_info = file_mapping->get_external_file_info(); |
728 | 4.20k | auto& scan_range_desc = external_info.scan_range_desc; |
729 | | |
730 | | // Clear to avoid reading iceberg position delete file... |
731 | 4.20k | scan_range_desc.table_format_params.iceberg_params = TIcebergFileDesc {}; |
732 | | |
733 | | // Clear to avoid reading hive transactional delete delta file... |
734 | 4.20k | scan_range_desc.table_format_params.transactional_hive_params = TTransactionalHiveDesc {}; |
735 | | |
736 | 4.20k | std::unique_ptr<RuntimeProfile> sub_runtime_profile = |
737 | 4.20k | std::make_unique<RuntimeProfile>("ExternalRowIDFetcher"); |
738 | 4.20k | { |
739 | 4.20k | std::unique_ptr<FileScanner> vfile_scanner_ptr = |
740 | 4.20k | FileScanner::create_unique(runtime_state.get(), sub_runtime_profile.get(), |
741 | 4.20k | &rpc_scan_params, &colname_to_slot_id, &tuple_desc); |
742 | | |
743 | 4.20k | RETURN_IF_ERROR(vfile_scanner_ptr->prepare_for_read_lines(scan_range_desc)); |
744 | 4.20k | RETURN_IF_ERROR(vfile_scanner_ptr->read_lines_from_range( |
745 | 4.20k | scan_range_desc, read_ids, &scan_blocks[idx], external_info, |
746 | 4.20k | &fetch_statistics[idx].init_reader_ms, &fetch_statistics[idx].get_block_ms)); |
747 | 4.20k | } |
748 | | |
749 | 4.20k | auto file_read_bytes_counter = |
750 | 4.20k | sub_runtime_profile->get_counter(FileScanner::FileReadBytesProfile); |
751 | | |
752 | 4.20k | if (file_read_bytes_counter != nullptr) { |
753 | 4.20k | fetch_statistics[idx].file_read_bytes = PrettyPrinter::print( |
754 | 4.20k | file_read_bytes_counter->value(), file_read_bytes_counter->type()); |
755 | 4.20k | } |
756 | | |
757 | 4.20k | auto file_read_times_counter = |
758 | 4.20k | sub_runtime_profile->get_counter(FileScanner::FileReadTimeProfile); |
759 | 4.20k | if (file_read_times_counter != nullptr) { |
760 | 4.20k | fetch_statistics[idx].file_read_times = PrettyPrinter::print( |
761 | 4.20k | file_read_times_counter->value(), file_read_times_counter->type()); |
762 | 4.20k | } |
763 | | |
764 | 4.20k | semaphore.release(); |
765 | 4.20k | if (++producer_count == scan_rows_count) { |
766 | 2.03k | std::lock_guard<std::mutex> lock(mtx); |
767 | 2.03k | cv.notify_one(); |
768 | 2.03k | } |
769 | 4.20k | return Status::OK(); |
770 | 4.20k | } |
771 | | |
772 | | Status RowIdStorageReader::read_batch_external_row( |
773 | | const uint64_t workload_group_id, const PRequestBlockDesc& request_block_desc, |
774 | | std::shared_ptr<IdFileMap> id_file_map, std::vector<SlotDescriptor>& slots, |
775 | | std::shared_ptr<FileMapping> first_file_mapping, const TUniqueId& query_id, |
776 | | Block& result_block, PRuntimeProfileTree* pprofile, int64_t* init_reader_avg_ms, |
777 | 2.03k | int64_t* get_block_avg_ms, size_t* scan_range_cnt) { |
778 | 2.03k | TFileScanRangeParams rpc_scan_params; |
779 | 2.03k | TupleDescriptor tuple_desc(request_block_desc.desc(), false); |
780 | 2.03k | std::unordered_map<std::string, int> colname_to_slot_id; |
781 | 2.03k | std::shared_ptr<RuntimeState> runtime_state = nullptr; |
782 | | |
783 | 2.03k | int max_file_scanners = 0; |
784 | 2.03k | { |
785 | 2.03k | if (result_block.is_empty_column()) [[likely]] { |
786 | 2.03k | result_block = Block(slots, request_block_desc.row_id_size()); |
787 | 2.03k | } |
788 | | |
789 | 2.03k | auto& external_info = first_file_mapping->get_external_file_info(); |
790 | 2.03k | int plan_node_id = external_info.plan_node_id; |
791 | 2.03k | const auto& first_scan_range_desc = external_info.scan_range_desc; |
792 | | |
793 | 2.03k | DCHECK(id_file_map->get_external_scan_params().contains(plan_node_id)); |
794 | 2.03k | const auto* old_scan_params = &(id_file_map->get_external_scan_params().at(plan_node_id)); |
795 | 2.03k | rpc_scan_params = *old_scan_params; |
796 | | |
797 | 2.03k | rpc_scan_params.required_slots.clear(); |
798 | 2.03k | rpc_scan_params.column_idxs.clear(); |
799 | 2.03k | rpc_scan_params.slot_name_to_schema_pos.clear(); |
800 | | |
801 | 2.03k | std::set partition_name_set(first_scan_range_desc.columns_from_path_keys.begin(), |
802 | 2.03k | first_scan_range_desc.columns_from_path_keys.end()); |
803 | 13.7k | for (auto slot_idx = 0; slot_idx < slots.size(); ++slot_idx) { |
804 | 11.7k | auto& slot = slots[slot_idx]; |
805 | 11.7k | tuple_desc.add_slot(&slot); |
806 | 11.7k | colname_to_slot_id.emplace(slot.col_name(), slot.id()); |
807 | 11.7k | TFileScanSlotInfo slot_info; |
808 | 11.7k | slot_info.slot_id = slot.id(); |
809 | 11.7k | auto column_idx = request_block_desc.column_idxs(slot_idx); |
810 | 11.7k | if (partition_name_set.contains(slot.col_name())) { |
811 | | //This is partition column. |
812 | 596 | slot_info.is_file_slot = false; |
813 | 11.1k | } else { |
814 | 11.1k | rpc_scan_params.column_idxs.emplace_back(column_idx); |
815 | 11.1k | slot_info.is_file_slot = true; |
816 | 11.1k | } |
817 | 11.7k | rpc_scan_params.default_value_of_src_slot.emplace(slot.id(), TExpr {}); |
818 | 11.7k | rpc_scan_params.required_slots.emplace_back(slot_info); |
819 | 11.7k | rpc_scan_params.slot_name_to_schema_pos.emplace(slot.col_name(), column_idx); |
820 | 11.7k | } |
821 | | |
822 | 2.03k | const auto& query_options = id_file_map->get_query_options(); |
823 | 2.03k | const auto& query_globals = id_file_map->get_query_globals(); |
824 | | /* |
825 | | * The scan stage needs the information in query_options to generate different behaviors according to the specific variables: |
826 | | * query_options.hive_parquet_use_column_names, query_options.truncate_char_or_varchar_columns,query_globals.time_zone ... |
827 | | * |
828 | | * To ensure the same behavior as the scan stage, I get query_options query_globals from id_file_map, then create runtime_state |
829 | | * and pass it to vfile_scanner so that the runtime_state information is the same as the scan stage and the behavior is also consistent. |
830 | | */ |
831 | 2.03k | runtime_state = RuntimeState::create_shared( |
832 | 2.03k | query_id, -1, query_options, query_globals, ExecEnv::GetInstance(), |
833 | 2.03k | ExecEnv::GetInstance()->rowid_storage_reader_tracker()); |
834 | | |
835 | 2.03k | max_file_scanners = id_file_map->get_max_file_scanners(); |
836 | 2.03k | } |
837 | | |
838 | | // Hash(TFileRangeDesc) => { all the rows that need to be read and their positions in the result block. } + file mapping |
839 | | // std::multimap<segment_v2::rowid_t, size_t> : The reason for using multimap is: may need the same row of data multiple times. |
840 | 2.03k | std::map<std::string, |
841 | 2.03k | std::pair<std::multimap<segment_v2::rowid_t, size_t>, std::shared_ptr<FileMapping>>> |
842 | 2.03k | scan_rows; |
843 | | |
844 | | // Block corresponding to the order of `scan_rows` map. |
845 | 2.03k | std::vector<Block> scan_blocks; |
846 | | |
847 | | // row_id (Indexing of vectors) => < In which block, which line in the block > |
848 | 2.03k | std::vector<std::pair<size_t, size_t>> row_id_block_idx; |
849 | | |
850 | | // Count the time/bytes it takes to read each TFileRangeDesc. (for profile) |
851 | 2.03k | std::vector<ExternalFetchStatistics> fetch_statistics; |
852 | | |
853 | 16.2k | auto hash_file_range = [](const TFileRangeDesc& file_range_desc) { |
854 | 16.2k | std::string value; |
855 | 16.2k | value.resize(file_range_desc.path.size() + sizeof(file_range_desc.start_offset)); |
856 | 16.2k | auto* ptr = value.data(); |
857 | | |
858 | 16.2k | memcpy(ptr, &file_range_desc.start_offset, sizeof(file_range_desc.start_offset)); |
859 | 16.2k | ptr += sizeof(file_range_desc.start_offset); |
860 | 16.2k | memcpy(ptr, file_range_desc.path.data(), file_range_desc.path.size()); |
861 | 16.2k | return value; |
862 | 16.2k | }; |
863 | | |
864 | 18.2k | for (int j = 0; j < request_block_desc.row_id_size(); ++j) { |
865 | 16.2k | auto file_id = request_block_desc.file_id(j); |
866 | 16.2k | auto file_mapping = id_file_map->get_file_mapping(file_id); |
867 | 16.2k | if (!file_mapping) { |
868 | 0 | return Status::InternalError( |
869 | 0 | "Backend:{} file_mapping not found, query_id: {}, file_id: {}", |
870 | 0 | BackendOptions::get_localhost(), print_id(query_id), file_id); |
871 | 0 | } |
872 | | |
873 | 16.2k | const auto& external_info = file_mapping->get_external_file_info(); |
874 | 16.2k | const auto& scan_range_desc = external_info.scan_range_desc; |
875 | | |
876 | 16.2k | auto scan_range_hash = hash_file_range(scan_range_desc); |
877 | 16.2k | if (scan_rows.contains(scan_range_hash)) { |
878 | 11.9k | scan_rows.at(scan_range_hash).first.emplace(request_block_desc.row_id(j), j); |
879 | 11.9k | } else { |
880 | 4.20k | std::multimap<segment_v2::rowid_t, size_t> tmp {{request_block_desc.row_id(j), j}}; |
881 | 4.20k | scan_rows.emplace(scan_range_hash, std::make_pair(tmp, file_mapping)); |
882 | 4.20k | } |
883 | 16.2k | } |
884 | | |
885 | 2.03k | scan_blocks.resize(scan_rows.size()); |
886 | 2.03k | row_id_block_idx.resize(request_block_desc.row_id_size()); |
887 | 2.03k | fetch_statistics.resize(scan_rows.size()); |
888 | | |
889 | | // Get the workload group for subsequent scan task submission. |
890 | 2.03k | std::vector<uint64_t> workload_group_ids; |
891 | 2.03k | workload_group_ids.emplace_back(workload_group_id); |
892 | 2.03k | auto wg = ExecEnv::GetInstance()->workload_group_mgr()->get_group(workload_group_ids); |
893 | 2.03k | doris::TaskScheduler* exec_sched = nullptr; |
894 | 2.03k | ScannerScheduler* scan_sched = nullptr; |
895 | 2.03k | ScannerScheduler* remote_scan_sched = nullptr; |
896 | 2.03k | wg->get_query_scheduler(&exec_sched, &scan_sched, &remote_scan_sched); |
897 | 2.03k | DCHECK(remote_scan_sched); |
898 | | |
899 | 2.03k | int64_t scan_running_time = 0; |
900 | 2.03k | RETURN_IF_ERROR(scope_timer_run( |
901 | 2.03k | [&]() -> Status { |
902 | | // Make sure to insert data into result_block only after all scan tasks have been executed. |
903 | 2.03k | std::atomic<int> producer_count {0}; |
904 | 2.03k | std::condition_variable cv; |
905 | 2.03k | std::mutex mtx; |
906 | | |
907 | | //semaphore: Limit the number of scan tasks submitted at one time |
908 | 2.03k | std::counting_semaphore semaphore {max_file_scanners}; |
909 | | |
910 | 2.03k | size_t idx = 0; |
911 | 2.03k | for (const auto& [_, scan_info] : scan_rows) { |
912 | 2.03k | semaphore.acquire(); |
913 | 2.03k | RETURN_IF_ERROR(remote_scan_sched->submit_scan_task( |
914 | 2.03k | SimplifiedScanTask( |
915 | 2.03k | [&, idx, scan_info]() -> Status { |
916 | 2.03k | const auto& [row_ids, file_mapping] = scan_info; |
917 | 2.03k | return read_external_row_from_file_mapping( |
918 | 2.03k | idx, row_ids, file_mapping, slots, query_id, |
919 | 2.03k | runtime_state, scan_blocks, row_id_block_idx, |
920 | 2.03k | fetch_statistics, rpc_scan_params, |
921 | 2.03k | colname_to_slot_id, producer_count, |
922 | 2.03k | scan_rows.size(), semaphore, cv, mtx, tuple_desc); |
923 | 2.03k | }, |
924 | 2.03k | nullptr, nullptr), |
925 | 2.03k | fmt::format("{}-read_batch_external_row-{}", print_id(query_id), idx))); |
926 | 2.03k | idx++; |
927 | 2.03k | } |
928 | | |
929 | 2.03k | { |
930 | 2.03k | std::unique_lock<std::mutex> lock(mtx); |
931 | 2.03k | cv.wait(lock, [&] { return producer_count == scan_rows.size(); }); |
932 | 2.03k | } |
933 | 2.03k | return Status::OK(); |
934 | 2.03k | }, |
935 | 2.03k | &scan_running_time)); |
936 | | |
937 | | // Insert the read data into result_block. |
938 | 13.7k | for (size_t column_id = 0; column_id < result_block.get_columns().size(); column_id++) { |
939 | | // The non-const Block(result_block) is passed in read_by_rowids, but columns[i] in get_columns |
940 | | // is at bottom an immutable_ptr of Cow<IColumn>, so use const_cast |
941 | 11.7k | auto dst_col = const_cast<IColumn*>(result_block.get_columns()[column_id].get()); |
942 | | |
943 | 11.7k | std::vector<const IColumn*> scan_src_columns; |
944 | 11.7k | scan_src_columns.reserve(row_id_block_idx.size()); |
945 | 11.7k | std::vector<size_t> scan_positions; |
946 | 11.7k | scan_positions.reserve(row_id_block_idx.size()); |
947 | 114k | for (const auto& [pos_block, block_idx] : row_id_block_idx) { |
948 | 114k | DCHECK(scan_blocks.size() > pos_block); |
949 | 114k | DCHECK(scan_blocks[pos_block].get_columns().size() > column_id); |
950 | 114k | scan_src_columns.emplace_back(scan_blocks[pos_block].get_columns()[column_id].get()); |
951 | 114k | scan_positions.emplace_back(block_idx); |
952 | 114k | } |
953 | 11.7k | dst_col->insert_from_multi_column(scan_src_columns, scan_positions); |
954 | 11.7k | } |
955 | | |
956 | | // Statistical runtime profile information. |
957 | 2.03k | std::unique_ptr<RuntimeProfile> runtime_profile = |
958 | 2.03k | std::make_unique<RuntimeProfile>("ExternalRowIDFetcher"); |
959 | 2.03k | { |
960 | 2.03k | runtime_profile->add_info_string(ScannersRunningTimeProfile, |
961 | 2.03k | std::to_string(scan_running_time) + "ms"); |
962 | 2.03k | fmt::memory_buffer file_read_lines_buffer; |
963 | 2.03k | format_to(file_read_lines_buffer, "["); |
964 | 2.03k | fmt::memory_buffer file_read_bytes_buffer; |
965 | 2.03k | format_to(file_read_bytes_buffer, "["); |
966 | 2.03k | fmt::memory_buffer file_read_times_buffer; |
967 | 2.03k | format_to(file_read_times_buffer, "["); |
968 | | |
969 | 2.03k | size_t idx = 0; |
970 | 4.20k | for (const auto& [_, scan_info] : scan_rows) { |
971 | 4.20k | format_to(file_read_lines_buffer, "{}, ", scan_info.first.size()); |
972 | 4.20k | *init_reader_avg_ms = fetch_statistics[idx].init_reader_ms; |
973 | 4.20k | *get_block_avg_ms += fetch_statistics[idx].get_block_ms; |
974 | 4.20k | format_to(file_read_bytes_buffer, "{}, ", fetch_statistics[idx].file_read_bytes); |
975 | 4.20k | format_to(file_read_times_buffer, "{}, ", fetch_statistics[idx].file_read_times); |
976 | 4.20k | idx++; |
977 | 4.20k | } |
978 | | |
979 | 2.03k | format_to(file_read_lines_buffer, "]"); |
980 | 2.03k | format_to(file_read_bytes_buffer, "]"); |
981 | 2.03k | format_to(file_read_times_buffer, "]"); |
982 | | |
983 | 2.03k | *init_reader_avg_ms /= fetch_statistics.size(); |
984 | 2.03k | *get_block_avg_ms /= fetch_statistics.size(); |
985 | 2.03k | runtime_profile->add_info_string(InitReaderAvgTimeProfile, |
986 | 2.03k | std::to_string(*init_reader_avg_ms) + "ms"); |
987 | 2.03k | runtime_profile->add_info_string(GetBlockAvgTimeProfile, |
988 | 2.03k | std::to_string(*init_reader_avg_ms) + "ms"); |
989 | 2.03k | runtime_profile->add_info_string(FileReadLinesProfile, |
990 | 2.03k | fmt::to_string(file_read_lines_buffer)); |
991 | 2.03k | runtime_profile->add_info_string(FileScanner::FileReadBytesProfile, |
992 | 2.03k | fmt::to_string(file_read_bytes_buffer)); |
993 | 2.03k | runtime_profile->add_info_string(FileScanner::FileReadTimeProfile, |
994 | 2.03k | fmt::to_string(file_read_times_buffer)); |
995 | 2.03k | } |
996 | | |
997 | 2.03k | runtime_profile->to_proto(pprofile, 2); |
998 | | |
999 | 2.03k | *scan_range_cnt = scan_rows.size(); |
1000 | | |
1001 | 2.03k | return Status::OK(); |
1002 | 2.03k | } |
1003 | | |
1004 | | Status RowIdStorageReader::read_doris_format_row( |
1005 | | const std::shared_ptr<IdFileMap>& id_file_map, |
1006 | | const std::shared_ptr<FileMapping>& file_mapping, const std::vector<uint32_t>& row_ids, |
1007 | | std::vector<SlotDescriptor>& slots, const TabletSchema& full_read_schema, |
1008 | | RowStoreReadStruct& row_store_read_struct, OlapReaderStatistics& stats, |
1009 | | int64_t* acquire_tablet_ms, int64_t* acquire_rowsets_ms, int64_t* acquire_segments_ms, |
1010 | | int64_t* lookup_row_data_ms, std::unordered_map<SegKey, SegItem, HashOfSegKey>& seg_map, |
1011 | | std::unordered_map<IteratorKey, IteratorItem, HashOfIteratorKey>& iterator_map, |
1012 | 6.83k | Block& result_block) { |
1013 | 6.83k | auto [tablet_id, rowset_id, segment_id] = file_mapping->get_doris_format_info(); |
1014 | 6.83k | SegKey seg_key {.tablet_id = tablet_id, .rowset_id = rowset_id, .segment_id = segment_id}; |
1015 | | |
1016 | 6.83k | BaseTabletSPtr tablet; |
1017 | 6.83k | BetaRowsetSharedPtr rowset; |
1018 | 6.83k | SegmentSharedPtr segment; |
1019 | 6.83k | if (seg_map.find(seg_key) == seg_map.end()) { |
1020 | 2.84k | tablet = scope_timer_run( |
1021 | 2.84k | [&]() { |
1022 | 2.84k | auto res = ExecEnv::get_tablet(tablet_id); |
1023 | 2.84k | return !res.has_value() ? nullptr |
1024 | 2.84k | : std::dynamic_pointer_cast<BaseTablet>(res.value()); |
1025 | 2.84k | }, |
1026 | 2.84k | acquire_tablet_ms); |
1027 | 2.84k | if (!tablet) { |
1028 | 0 | return Status::InternalError( |
1029 | 0 | "Backend:{} tablet not found, tablet_id: {}, rowset_id: {}, segment_id: {}, " |
1030 | 0 | "row_id: {}", |
1031 | 0 | BackendOptions::get_localhost(), tablet_id, rowset_id.to_string(), segment_id, |
1032 | 0 | row_ids[0]); |
1033 | 0 | } |
1034 | | |
1035 | 2.84k | rowset = std::static_pointer_cast<BetaRowset>(scope_timer_run( |
1036 | 2.84k | [&]() { return id_file_map->get_temp_rowset(tablet_id, rowset_id); }, |
1037 | 2.84k | acquire_rowsets_ms)); |
1038 | 2.84k | if (!rowset) { |
1039 | 0 | return Status::InternalError( |
1040 | 0 | "Backend:{} rowset_id not found, tablet_id: {}, rowset_id: {}, segment_id: {}, " |
1041 | 0 | "row_id: {}", |
1042 | 0 | BackendOptions::get_localhost(), tablet_id, rowset_id.to_string(), segment_id, |
1043 | 0 | row_ids[0]); |
1044 | 0 | } |
1045 | | |
1046 | 2.84k | SegmentCacheHandle segment_cache; |
1047 | 2.84k | RETURN_IF_ERROR(scope_timer_run( |
1048 | 2.84k | [&]() { |
1049 | 2.84k | return SegmentLoader::instance()->load_segments(rowset, &segment_cache, true); |
1050 | 2.84k | }, |
1051 | 2.84k | acquire_segments_ms)); |
1052 | | |
1053 | 2.84k | auto it = std::find_if(segment_cache.get_segments().cbegin(), |
1054 | 2.84k | segment_cache.get_segments().cend(), |
1055 | 2.84k | [segment_id](const segment_v2::SegmentSharedPtr& seg) { |
1056 | 2.84k | return seg->id() == segment_id; |
1057 | 2.84k | }); |
1058 | 2.84k | if (it == segment_cache.get_segments().end()) { |
1059 | 0 | return Status::InternalError( |
1060 | 0 | "Backend:{} segment not found, tablet_id: {}, rowset_id: {}, segment_id: {}, " |
1061 | 0 | "row_id: {}", |
1062 | 0 | BackendOptions::get_localhost(), tablet_id, rowset_id.to_string(), segment_id, |
1063 | 0 | row_ids[0]); |
1064 | 0 | } |
1065 | 2.84k | segment = *it; |
1066 | 2.84k | seg_map[seg_key] = SegItem {.tablet = tablet, .rowset = rowset, .segment = segment}; |
1067 | 3.99k | } else { |
1068 | 3.99k | auto& seg_item = seg_map[seg_key]; |
1069 | 3.99k | tablet = seg_item.tablet; |
1070 | 3.99k | rowset = seg_item.rowset; |
1071 | 3.99k | segment = seg_item.segment; |
1072 | 3.99k | } |
1073 | | |
1074 | | // if row_store_read_struct not empty, means the line we should read from row_store |
1075 | 6.83k | if (!row_store_read_struct.default_values.empty()) { |
1076 | 3.37k | if (!tablet->tablet_schema()->has_row_store_for_all_columns()) { |
1077 | 0 | return Status::InternalError("Tablet {} does not have row store for all columns", |
1078 | 0 | tablet->tablet_id()); |
1079 | 0 | } |
1080 | 4.44k | for (auto row_id : row_ids) { |
1081 | 4.44k | RowLocation loc(rowset_id, segment->id(), cast_set<uint32_t>(row_id)); |
1082 | 4.44k | row_store_read_struct.row_store_buffer.clear(); |
1083 | 4.44k | RETURN_IF_ERROR(scope_timer_run( |
1084 | 4.44k | [&]() { |
1085 | 4.44k | return tablet->lookup_row_data({}, loc, rowset, stats, |
1086 | 4.44k | row_store_read_struct.row_store_buffer); |
1087 | 4.44k | }, |
1088 | 4.44k | lookup_row_data_ms)); |
1089 | | |
1090 | 4.44k | RETURN_IF_ERROR(JsonbSerializeUtil::jsonb_to_block( |
1091 | 4.44k | row_store_read_struct.serdes, row_store_read_struct.row_store_buffer.data(), |
1092 | 4.44k | row_store_read_struct.row_store_buffer.size(), |
1093 | 4.44k | row_store_read_struct.col_uid_to_idx, result_block, |
1094 | 4.44k | row_store_read_struct.default_values, {})); |
1095 | 4.44k | } |
1096 | 3.46k | } else { |
1097 | 25.8k | for (int x = 0; x < slots.size(); ++x) { |
1098 | 22.3k | MutableColumnPtr column = result_block.get_by_position(x).column->assume_mutable(); |
1099 | 22.3k | IteratorKey iterator_key {.tablet_id = tablet_id, |
1100 | 22.3k | .rowset_id = rowset_id, |
1101 | 22.3k | .segment_id = segment_id, |
1102 | 22.3k | .slot_id = slots[x].id()}; |
1103 | 22.3k | IteratorItem& iterator_item = iterator_map[iterator_key]; |
1104 | 22.3k | if (iterator_item.segment == nullptr) { |
1105 | 11.8k | iterator_map[iterator_key].segment = segment; |
1106 | 11.8k | iterator_item.storage_read_options.stats = &stats; |
1107 | 11.8k | iterator_item.storage_read_options.io_ctx.reader_type = ReaderType::READER_QUERY; |
1108 | 11.8k | } |
1109 | 28.5k | for (auto row_id : row_ids) { |
1110 | 28.5k | RETURN_IF_ERROR(segment->seek_and_read_by_rowid( |
1111 | 28.5k | full_read_schema, &slots[x], row_id, column, |
1112 | 28.5k | iterator_item.storage_read_options, iterator_item.iterator)); |
1113 | 28.5k | } |
1114 | 22.3k | } |
1115 | 3.46k | } |
1116 | 6.83k | return Status::OK(); |
1117 | 6.83k | } |
1118 | | |
1119 | | #include "common/compile_check_end.h" |
1120 | | |
1121 | | } // namespace doris |