be/src/service/http/action/stream_load.cpp
Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | #include "service/http/action/stream_load.h" |
19 | | |
20 | | // use string iequal |
21 | | #include <event2/buffer.h> |
22 | | #include <event2/http.h> |
23 | | #include <gen_cpp/FrontendService.h> |
24 | | #include <gen_cpp/FrontendService_types.h> |
25 | | #include <gen_cpp/HeartbeatService_types.h> |
26 | | #include <gen_cpp/PaloInternalService_types.h> |
27 | | #include <gen_cpp/PlanNodes_types.h> |
28 | | #include <gen_cpp/Types_types.h> |
29 | | #include <sys/time.h> |
30 | | #include <thrift/protocol/TDebugProtocol.h> |
31 | | |
32 | | #include <algorithm> |
33 | | #include <cstdint> |
34 | | #include <cstdlib> |
35 | | #include <ctime> |
36 | | #include <functional> |
37 | | #include <future> |
38 | | #include <sstream> |
39 | | #include <stdexcept> |
40 | | #include <utility> |
41 | | |
42 | | #include "cloud/config.h" |
43 | | #include "common/config.h" |
44 | | #include "common/consts.h" |
45 | | #include "common/logging.h" |
46 | | #include "common/metrics/doris_metrics.h" |
47 | | #include "common/metrics/metrics.h" |
48 | | #include "common/status.h" |
49 | | #include "common/utils.h" |
50 | | #include "io/fs/stream_load_pipe.h" |
51 | | #include "load/group_commit/group_commit_mgr.h" |
52 | | #include "load/load_path_mgr.h" |
53 | | #include "load/message_body_sink.h" |
54 | | #include "load/stream_load/new_load_stream_mgr.h" |
55 | | #include "load/stream_load/stream_load_context.h" |
56 | | #include "load/stream_load/stream_load_executor.h" |
57 | | #include "load/stream_load/stream_load_recorder.h" |
58 | | #include "runtime/exec_env.h" |
59 | | #include "service/http/http_channel.h" |
60 | | #include "service/http/http_common.h" |
61 | | #include "service/http/http_headers.h" |
62 | | #include "service/http/http_request.h" |
63 | | #include "service/http/utils.h" |
64 | | #include "storage/storage_engine.h" |
65 | | #include "util/byte_buffer.h" |
66 | | #include "util/client_cache.h" |
67 | | #include "util/load_util.h" |
68 | | #include "util/string_util.h" |
69 | | #include "util/thrift_rpc_helper.h" |
70 | | #include "util/time.h" |
71 | | #include "util/uid_util.h" |
72 | | #include "util/url_coding.h" |
73 | | |
74 | | namespace doris { |
75 | | using namespace ErrorCode; |
76 | | |
77 | | DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(streaming_load_requests_total, MetricUnit::REQUESTS); |
78 | | DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(streaming_load_duration_ms, MetricUnit::MILLISECONDS); |
79 | | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(streaming_load_current_processing, MetricUnit::REQUESTS); |
80 | | |
81 | | bvar::LatencyRecorder g_stream_load_receive_data_latency_ms("stream_load_receive_data_latency_ms"); |
82 | | bvar::LatencyRecorder g_stream_load_commit_and_publish_latency_ms("stream_load", |
83 | | "commit_and_publish_ms"); |
84 | | |
85 | | static constexpr size_t MIN_CHUNK_SIZE = 64 * 1024; |
86 | | static const std::string CHUNK = "chunked"; |
87 | | |
88 | | #ifdef BE_TEST |
89 | | TStreamLoadPutResult k_stream_load_put_result; |
90 | | #endif |
91 | | |
92 | 1 | StreamLoadAction::StreamLoadAction(ExecEnv* exec_env) : _exec_env(exec_env) { |
93 | 1 | _stream_load_entity = |
94 | 1 | DorisMetrics::instance()->metric_registry()->register_entity("stream_load"); |
95 | 1 | INT_COUNTER_METRIC_REGISTER(_stream_load_entity, streaming_load_requests_total); |
96 | 1 | INT_COUNTER_METRIC_REGISTER(_stream_load_entity, streaming_load_duration_ms); |
97 | 1 | INT_GAUGE_METRIC_REGISTER(_stream_load_entity, streaming_load_current_processing); |
98 | 1 | } |
99 | | |
100 | 1 | StreamLoadAction::~StreamLoadAction() { |
101 | 1 | DorisMetrics::instance()->metric_registry()->deregister_entity(_stream_load_entity); |
102 | 1 | } |
103 | | |
104 | 0 | void StreamLoadAction::handle(HttpRequest* req) { |
105 | 0 | std::shared_ptr<StreamLoadContext> ctx = |
106 | 0 | std::static_pointer_cast<StreamLoadContext>(req->handler_ctx()); |
107 | 0 | if (ctx == nullptr) { |
108 | 0 | return; |
109 | 0 | } |
110 | | |
111 | 0 | { |
112 | 0 | std::unique_lock<std::mutex> lock1(ctx->_send_reply_lock); |
113 | 0 | ctx->_can_send_reply = true; |
114 | 0 | ctx->_can_send_reply_cv.notify_all(); |
115 | 0 | } |
116 | | |
117 | | // status already set to fail |
118 | 0 | if (ctx->status.ok()) { |
119 | 0 | ctx->status = _handle(ctx, req); |
120 | 0 | if (!ctx->status.ok() && !ctx->status.is<PUBLISH_TIMEOUT>()) { |
121 | 0 | _send_reply(ctx, req); |
122 | 0 | } |
123 | 0 | } |
124 | 0 | } |
125 | | |
126 | 0 | Status StreamLoadAction::_handle(std::shared_ptr<StreamLoadContext> ctx, HttpRequest* req) { |
127 | 0 | if (ctx->body_bytes > 0 && ctx->receive_bytes != ctx->body_bytes) { |
128 | 0 | LOG(WARNING) << "recevie body don't equal with body bytes, body_bytes=" << ctx->body_bytes |
129 | 0 | << ", receive_bytes=" << ctx->receive_bytes << ", id=" << ctx->id; |
130 | 0 | return Status::Error<ErrorCode::NETWORK_ERROR>("receive body don't equal with body bytes"); |
131 | 0 | } |
132 | | |
133 | | // if we use non-streaming, MessageBodyFileSink.finish will close the file |
134 | 0 | RETURN_IF_ERROR(ctx->body_sink->finish()); |
135 | 0 | if (!ctx->use_streaming) { |
136 | | // we need to close file first, then execute_plan_fragment here |
137 | 0 | ctx->body_sink.reset(); |
138 | 0 | TPipelineFragmentParamsList mocked; |
139 | 0 | RETURN_IF_ERROR(_exec_env->stream_load_executor()->execute_plan_fragment( |
140 | 0 | ctx, mocked, |
141 | 0 | [req, this](std::shared_ptr<StreamLoadContext> ctx) { _on_finish(ctx, req); })); |
142 | 0 | } |
143 | | |
144 | 0 | return Status::OK(); |
145 | 0 | } |
146 | | |
147 | 0 | void StreamLoadAction::_on_finish(std::shared_ptr<StreamLoadContext> ctx, HttpRequest* req) { |
148 | 0 | ctx->status = ctx->load_status_future.get(); |
149 | 0 | if (ctx->status.ok()) { |
150 | 0 | if (ctx->group_commit) { |
151 | 0 | LOG(INFO) << "skip commit because this is group commit, pipe_id=" |
152 | 0 | << ctx->id.to_string(); |
153 | 0 | } else if (ctx->two_phase_commit) { |
154 | 0 | int64_t pre_commit_start_time = MonotonicNanos(); |
155 | 0 | ctx->status = _exec_env->stream_load_executor()->pre_commit_txn(ctx.get()); |
156 | 0 | ctx->pre_commit_txn_cost_nanos = MonotonicNanos() - pre_commit_start_time; |
157 | 0 | } else { |
158 | | // If put file success we need commit this load |
159 | 0 | int64_t commit_and_publish_start_time = MonotonicNanos(); |
160 | 0 | ctx->status = _exec_env->stream_load_executor()->commit_txn(ctx.get()); |
161 | 0 | ctx->commit_and_publish_txn_cost_nanos = |
162 | 0 | MonotonicNanos() - commit_and_publish_start_time; |
163 | 0 | g_stream_load_commit_and_publish_latency_ms |
164 | 0 | << ctx->commit_and_publish_txn_cost_nanos / 1000000; |
165 | 0 | } |
166 | 0 | } |
167 | 0 | _send_reply(ctx, req); |
168 | 0 | } |
169 | | |
170 | 0 | void StreamLoadAction::_send_reply(std::shared_ptr<StreamLoadContext> ctx, HttpRequest* req) { |
171 | 0 | std::unique_lock<std::mutex> lock1(ctx->_send_reply_lock); |
172 | | // 1. _can_send_reply: ensure `send_reply` is invoked only after on_header/handle complete, |
173 | | // avoid client errors (e.g., broken pipe). |
174 | | // 2. _finish_send_reply: Prevent duplicate reply sending; skip reply if HTTP request is canceled |
175 | | // due to long import execution time. |
176 | 0 | while (!ctx->_finish_send_reply && !ctx->_can_send_reply) { |
177 | 0 | ctx->_can_send_reply_cv.wait(lock1); |
178 | 0 | } |
179 | 0 | if (ctx->_finish_send_reply) { |
180 | 0 | return; |
181 | 0 | } |
182 | 0 | DCHECK(ctx->_can_send_reply); |
183 | 0 | ctx->_finish_send_reply = true; |
184 | 0 | ctx->_can_send_reply_cv.notify_all(); |
185 | 0 | ctx->load_cost_millis = UnixMillis() - ctx->start_millis; |
186 | |
|
187 | 0 | if (!ctx->status.ok() && !ctx->status.is<PUBLISH_TIMEOUT>()) { |
188 | 0 | LOG(WARNING) << "handle streaming load failed, id=" << ctx->id |
189 | 0 | << ", errmsg=" << ctx->status; |
190 | 0 | if (ctx->need_rollback) { |
191 | 0 | _exec_env->stream_load_executor()->rollback_txn(ctx.get()); |
192 | 0 | ctx->need_rollback = false; |
193 | 0 | } |
194 | 0 | if (ctx->body_sink != nullptr) { |
195 | 0 | ctx->body_sink->cancel(ctx->status.to_string()); |
196 | 0 | } |
197 | 0 | } |
198 | |
|
199 | 0 | auto str = ctx->to_json(); |
200 | | // add new line at end |
201 | 0 | str = str + '\n'; |
202 | |
|
203 | | #ifndef BE_TEST |
204 | | if (config::enable_stream_load_record || config::enable_stream_load_record_to_audit_log_table) { |
205 | | if (req->header(HTTP_SKIP_RECORD_TO_AUDIT_LOG_TABLE).empty()) { |
206 | | str = ctx->prepare_stream_load_record(str); |
207 | | _save_stream_load_record(ctx, str); |
208 | | } |
209 | | } |
210 | | #endif |
211 | |
|
212 | 0 | HttpChannel::send_reply(req, str); |
213 | |
|
214 | 0 | LOG(INFO) << "finished to execute stream load. label=" << ctx->label |
215 | 0 | << ", txn_id=" << ctx->txn_id << ", query_id=" << ctx->id |
216 | 0 | << ", load_cost_ms=" << ctx->load_cost_millis << ", receive_data_cost_ms=" |
217 | 0 | << (ctx->receive_and_read_data_cost_nanos - ctx->read_data_cost_nanos) / 1000000 |
218 | 0 | << ", read_data_cost_ms=" << ctx->read_data_cost_nanos / 1000000 |
219 | 0 | << ", write_data_cost_ms=" << ctx->write_data_cost_nanos / 1000000 |
220 | 0 | << ", commit_and_publish_txn_cost_ms=" |
221 | 0 | << ctx->commit_and_publish_txn_cost_nanos / 1000000 |
222 | 0 | << ", number_total_rows=" << ctx->number_total_rows |
223 | 0 | << ", number_loaded_rows=" << ctx->number_loaded_rows |
224 | 0 | << ", receive_bytes=" << ctx->receive_bytes << ", loaded_bytes=" << ctx->loaded_bytes |
225 | 0 | << ", error_url=" << ctx->error_url; |
226 | | |
227 | | // update statistics |
228 | 0 | streaming_load_requests_total->increment(1); |
229 | 0 | streaming_load_duration_ms->increment(ctx->load_cost_millis); |
230 | 0 | if (!ctx->data_saved_path.empty()) { |
231 | 0 | _exec_env->load_path_mgr()->clean_tmp_files(ctx->data_saved_path); |
232 | 0 | } |
233 | 0 | } |
234 | | |
235 | 0 | int StreamLoadAction::on_header(HttpRequest* req) { |
236 | 0 | req->mark_send_reply(); |
237 | |
|
238 | 0 | streaming_load_current_processing->increment(1); |
239 | |
|
240 | 0 | std::shared_ptr<StreamLoadContext> ctx = std::make_shared<StreamLoadContext>(_exec_env); |
241 | 0 | req->set_handler_ctx(ctx); |
242 | |
|
243 | 0 | ctx->load_type = TLoadType::MANUL_LOAD; |
244 | 0 | ctx->load_src_type = TLoadSourceType::RAW; |
245 | |
|
246 | 0 | url_decode(req->param(HTTP_DB_KEY), &ctx->db); |
247 | 0 | url_decode(req->param(HTTP_TABLE_KEY), &ctx->table); |
248 | 0 | ctx->label = req->header(HTTP_LABEL_KEY); |
249 | 0 | ctx->two_phase_commit = req->header(HTTP_TWO_PHASE_COMMIT) == "true"; |
250 | 0 | Status st = _handle_group_commit(req, ctx); |
251 | 0 | if (!ctx->group_commit && ctx->label.empty()) { |
252 | 0 | ctx->label = generate_uuid_string(); |
253 | 0 | } |
254 | |
|
255 | 0 | LOG(INFO) << "new income streaming load request." << ctx->brief() << ", db=" << ctx->db |
256 | 0 | << ", tbl=" << ctx->table << ", group_commit=" << ctx->group_commit |
257 | 0 | << ", HTTP headers=" << req->get_all_headers(); |
258 | 0 | ctx->begin_receive_and_read_data_cost_nanos = MonotonicNanos(); |
259 | |
|
260 | 0 | if (st.ok()) { |
261 | 0 | st = _on_header(req, ctx); |
262 | 0 | LOG(INFO) << "finished to handle HTTP header, " << ctx->brief(); |
263 | 0 | } |
264 | 0 | if (!st.ok()) { |
265 | 0 | ctx->status = std::move(st); |
266 | 0 | { |
267 | 0 | std::unique_lock<std::mutex> lock1(ctx->_send_reply_lock); |
268 | 0 | ctx->_can_send_reply = true; |
269 | 0 | ctx->_can_send_reply_cv.notify_all(); |
270 | 0 | } |
271 | 0 | _send_reply(ctx, req); |
272 | 0 | return -1; |
273 | 0 | } |
274 | 0 | return 0; |
275 | 0 | } |
276 | | |
277 | 0 | Status StreamLoadAction::_on_header(HttpRequest* http_req, std::shared_ptr<StreamLoadContext> ctx) { |
278 | | // auth information |
279 | 0 | if (!parse_basic_auth(*http_req, &ctx->auth)) { |
280 | 0 | LOG(WARNING) << "parse basic authorization failed." << ctx->brief(); |
281 | 0 | return Status::NotAuthorized("no valid Basic authorization"); |
282 | 0 | } |
283 | | |
284 | | // get format of this put |
285 | 0 | std::string format_str = http_req->header(HTTP_FORMAT_KEY); |
286 | 0 | if (iequal(format_str, BeConsts::CSV_WITH_NAMES) || |
287 | 0 | iequal(format_str, BeConsts::CSV_WITH_NAMES_AND_TYPES)) { |
288 | 0 | ctx->header_type = format_str; |
289 | | //treat as CSV |
290 | 0 | format_str = BeConsts::CSV; |
291 | 0 | } |
292 | 0 | LoadUtil::parse_format(format_str, http_req->header(HTTP_COMPRESS_TYPE), &ctx->format, |
293 | 0 | &ctx->compress_type); |
294 | 0 | if (ctx->format == TFileFormatType::FORMAT_UNKNOWN) { |
295 | 0 | return Status::Error<ErrorCode::DATA_FILE_TYPE_ERROR>("unknown data format, format={}", |
296 | 0 | http_req->header(HTTP_FORMAT_KEY)); |
297 | 0 | } |
298 | | |
299 | | // check content length |
300 | 0 | ctx->body_bytes = 0; |
301 | 0 | size_t csv_max_body_bytes = config::streaming_load_max_mb * 1024 * 1024; |
302 | 0 | size_t json_max_body_bytes = config::streaming_load_json_max_mb * 1024 * 1024; |
303 | 0 | bool read_json_by_line = false; |
304 | 0 | if (!http_req->header(HTTP_READ_JSON_BY_LINE).empty()) { |
305 | 0 | if (iequal(http_req->header(HTTP_READ_JSON_BY_LINE), "true")) { |
306 | 0 | read_json_by_line = true; |
307 | 0 | } |
308 | 0 | } |
309 | 0 | if (!http_req->header(HttpHeaders::CONTENT_LENGTH).empty()) { |
310 | 0 | try { |
311 | 0 | ctx->body_bytes = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH)); |
312 | 0 | } catch (const std::exception& e) { |
313 | 0 | return Status::InvalidArgument("invalid HTTP header CONTENT_LENGTH={}: {}", |
314 | 0 | http_req->header(HttpHeaders::CONTENT_LENGTH), e.what()); |
315 | 0 | } |
316 | | // json max body size |
317 | 0 | if ((ctx->format == TFileFormatType::FORMAT_JSON) && |
318 | 0 | (ctx->body_bytes > json_max_body_bytes) && !read_json_by_line) { |
319 | 0 | return Status::Error<ErrorCode::EXCEEDED_LIMIT>( |
320 | 0 | "json body size {} exceed BE's conf `streaming_load_json_max_mb` {}. increase " |
321 | 0 | "it if you are sure this load is reasonable", |
322 | 0 | ctx->body_bytes, json_max_body_bytes); |
323 | 0 | } |
324 | | // csv max body size |
325 | 0 | else if (ctx->body_bytes > csv_max_body_bytes) { |
326 | 0 | LOG(WARNING) << "body exceed max size." << ctx->brief(); |
327 | 0 | return Status::Error<ErrorCode::EXCEEDED_LIMIT>( |
328 | 0 | "body size {} exceed BE's conf `streaming_load_max_mb` {}. increase it if you " |
329 | 0 | "are sure this load is reasonable", |
330 | 0 | ctx->body_bytes, csv_max_body_bytes); |
331 | 0 | } |
332 | 0 | } else { |
333 | | #ifndef BE_TEST |
334 | | evhttp_connection_set_max_body_size( |
335 | | evhttp_request_get_connection(http_req->get_evhttp_request()), csv_max_body_bytes); |
336 | | #endif |
337 | 0 | } |
338 | | |
339 | 0 | if (!http_req->header(HttpHeaders::TRANSFER_ENCODING).empty()) { |
340 | 0 | if (http_req->header(HttpHeaders::TRANSFER_ENCODING).find(CHUNK) != std::string::npos) { |
341 | 0 | ctx->is_chunked_transfer = true; |
342 | 0 | } |
343 | 0 | } |
344 | 0 | if (UNLIKELY((http_req->header(HttpHeaders::CONTENT_LENGTH).empty() && |
345 | 0 | !ctx->is_chunked_transfer))) { |
346 | 0 | LOG(WARNING) << "content_length is empty and transfer-encoding!=chunked, please set " |
347 | 0 | "content_length or transfer-encoding=chunked"; |
348 | 0 | return Status::InvalidArgument( |
349 | 0 | "content_length is empty and transfer-encoding!=chunked, please set content_length " |
350 | 0 | "or transfer-encoding=chunked"); |
351 | 0 | } else if (UNLIKELY(!http_req->header(HttpHeaders::CONTENT_LENGTH).empty() && |
352 | 0 | ctx->is_chunked_transfer)) { |
353 | 0 | LOG(WARNING) << "please do not set both content_length and transfer-encoding"; |
354 | 0 | return Status::InvalidArgument( |
355 | 0 | "please do not set both content_length and transfer-encoding"); |
356 | 0 | } |
357 | | |
358 | 0 | if (!http_req->header(HTTP_TIMEOUT).empty()) { |
359 | 0 | ctx->timeout_second = DORIS_TRY(safe_stoi(http_req->header(HTTP_TIMEOUT), HTTP_TIMEOUT)); |
360 | 0 | } |
361 | 0 | if (!http_req->header(HTTP_COMMENT).empty()) { |
362 | 0 | ctx->load_comment = http_req->header(HTTP_COMMENT); |
363 | 0 | } |
364 | | // begin transaction |
365 | 0 | if (!ctx->group_commit) { |
366 | 0 | int64_t begin_txn_start_time = MonotonicNanos(); |
367 | 0 | RETURN_IF_ERROR(_exec_env->stream_load_executor()->begin_txn(ctx.get())); |
368 | 0 | ctx->begin_txn_cost_nanos = MonotonicNanos() - begin_txn_start_time; |
369 | 0 | } |
370 | | |
371 | | // process put file |
372 | 0 | return _process_put(http_req, ctx); |
373 | 0 | } |
374 | | |
375 | 0 | void StreamLoadAction::on_chunk_data(HttpRequest* req) { |
376 | 0 | std::shared_ptr<StreamLoadContext> ctx = |
377 | 0 | std::static_pointer_cast<StreamLoadContext>(req->handler_ctx()); |
378 | 0 | if (ctx == nullptr || !ctx->status.ok()) { |
379 | 0 | return; |
380 | 0 | } |
381 | | |
382 | 0 | struct evhttp_request* ev_req = req->get_evhttp_request(); |
383 | 0 | auto evbuf = evhttp_request_get_input_buffer(ev_req); |
384 | |
|
385 | 0 | SCOPED_ATTACH_TASK(ExecEnv::GetInstance()->stream_load_pipe_tracker()); |
386 | |
|
387 | 0 | int64_t start_read_data_time = MonotonicNanos(); |
388 | 0 | while (evbuffer_get_length(evbuf) > 0) { |
389 | 0 | ByteBufferPtr bb; |
390 | 0 | Status st = ByteBuffer::allocate(128 * 1024, &bb); |
391 | 0 | if (!st.ok()) { |
392 | 0 | ctx->status = st; |
393 | 0 | return; |
394 | 0 | } |
395 | 0 | auto remove_bytes = evbuffer_remove(evbuf, bb->ptr, bb->capacity); |
396 | 0 | bb->pos = remove_bytes; |
397 | 0 | bb->flip(); |
398 | 0 | st = ctx->body_sink->append(bb); |
399 | 0 | if (!st.ok()) { |
400 | 0 | LOG(WARNING) << "append body content failed. errmsg=" << st << ", " << ctx->brief(); |
401 | 0 | ctx->status = st; |
402 | 0 | return; |
403 | 0 | } |
404 | 0 | ctx->receive_bytes += remove_bytes; |
405 | 0 | } |
406 | 0 | int64_t read_data_time = MonotonicNanos() - start_read_data_time; |
407 | 0 | int64_t last_receive_and_read_data_cost_nanos = ctx->receive_and_read_data_cost_nanos; |
408 | 0 | ctx->read_data_cost_nanos += read_data_time; |
409 | 0 | ctx->receive_and_read_data_cost_nanos = |
410 | 0 | MonotonicNanos() - ctx->begin_receive_and_read_data_cost_nanos; |
411 | 0 | g_stream_load_receive_data_latency_ms |
412 | 0 | << (ctx->receive_and_read_data_cost_nanos - last_receive_and_read_data_cost_nanos - |
413 | 0 | read_data_time) / |
414 | 0 | 1000000; |
415 | 0 | } |
416 | | |
417 | 0 | void StreamLoadAction::free_handler_ctx(std::shared_ptr<void> param) { |
418 | 0 | std::shared_ptr<StreamLoadContext> ctx = std::static_pointer_cast<StreamLoadContext>(param); |
419 | 0 | if (ctx == nullptr) { |
420 | 0 | return; |
421 | 0 | } |
422 | | // sender is gone, make receiver know it |
423 | 0 | if (ctx->body_sink != nullptr) { |
424 | 0 | ctx->body_sink->cancel("sender is gone"); |
425 | 0 | } |
426 | | // remove stream load context from stream load manager and the resource will be released |
427 | 0 | ctx->exec_env()->new_load_stream_mgr()->remove(ctx->id); |
428 | 0 | streaming_load_current_processing->increment(-1); |
429 | 0 | } |
430 | | |
431 | | Status StreamLoadAction::_process_put(HttpRequest* http_req, |
432 | 0 | std::shared_ptr<StreamLoadContext> ctx) { |
433 | | // Now we use stream |
434 | 0 | ctx->use_streaming = LoadUtil::is_format_support_streaming(ctx->format); |
435 | | |
436 | | // put request |
437 | 0 | TStreamLoadPutRequest request; |
438 | 0 | set_request_auth(&request, ctx->auth); |
439 | 0 | request.db = ctx->db; |
440 | 0 | request.tbl = ctx->table; |
441 | 0 | request.txnId = ctx->txn_id; |
442 | 0 | request.formatType = ctx->format; |
443 | 0 | request.__set_compress_type(ctx->compress_type); |
444 | 0 | request.__set_header_type(ctx->header_type); |
445 | 0 | request.__set_loadId(ctx->id.to_thrift()); |
446 | 0 | if (ctx->use_streaming) { |
447 | 0 | std::shared_ptr<io::StreamLoadPipe> pipe; |
448 | 0 | if (ctx->is_chunked_transfer) { |
449 | 0 | pipe = std::make_shared<io::StreamLoadPipe>( |
450 | 0 | io::kMaxPipeBufferedBytes /* max_buffered_bytes */); |
451 | 0 | pipe->set_is_chunked_transfer(true); |
452 | 0 | } else { |
453 | 0 | pipe = std::make_shared<io::StreamLoadPipe>( |
454 | 0 | io::kMaxPipeBufferedBytes /* max_buffered_bytes */, |
455 | 0 | MIN_CHUNK_SIZE /* min_chunk_size */, ctx->body_bytes /* total_length */); |
456 | 0 | } |
457 | 0 | request.fileType = TFileType::FILE_STREAM; |
458 | 0 | ctx->body_sink = pipe; |
459 | 0 | ctx->pipe = pipe; |
460 | 0 | RETURN_IF_ERROR(_exec_env->new_load_stream_mgr()->put(ctx->id, ctx)); |
461 | 0 | } else { |
462 | 0 | RETURN_IF_ERROR(_data_saved_path(http_req, &request.path, ctx->body_bytes)); |
463 | 0 | auto file_sink = std::make_shared<MessageBodyFileSink>(request.path); |
464 | 0 | RETURN_IF_ERROR(file_sink->open()); |
465 | 0 | request.__isset.path = true; |
466 | 0 | request.fileType = TFileType::FILE_LOCAL; |
467 | 0 | request.__set_file_size(ctx->body_bytes); |
468 | 0 | ctx->body_sink = file_sink; |
469 | 0 | ctx->data_saved_path = request.path; |
470 | 0 | } |
471 | 0 | if (!http_req->header(HTTP_COLUMNS).empty()) { |
472 | 0 | request.__set_columns(http_req->header(HTTP_COLUMNS)); |
473 | 0 | } |
474 | 0 | if (!http_req->header(HTTP_WHERE).empty()) { |
475 | 0 | request.__set_where(http_req->header(HTTP_WHERE)); |
476 | 0 | } |
477 | 0 | if (!http_req->header(HTTP_COLUMN_SEPARATOR).empty()) { |
478 | 0 | request.__set_columnSeparator(http_req->header(HTTP_COLUMN_SEPARATOR)); |
479 | 0 | } |
480 | 0 | if (!http_req->header(HTTP_LINE_DELIMITER).empty()) { |
481 | 0 | request.__set_line_delimiter(http_req->header(HTTP_LINE_DELIMITER)); |
482 | 0 | } |
483 | 0 | if (!http_req->header(HTTP_ENCLOSE).empty() && !http_req->header(HTTP_ENCLOSE).empty()) { |
484 | 0 | const auto& enclose_str = http_req->header(HTTP_ENCLOSE); |
485 | 0 | if (enclose_str.length() != 1) { |
486 | 0 | return Status::InvalidArgument("enclose must be single-char, actually is {}", |
487 | 0 | enclose_str); |
488 | 0 | } |
489 | 0 | request.__set_enclose(http_req->header(HTTP_ENCLOSE)[0]); |
490 | 0 | } |
491 | 0 | if (!http_req->header(HTTP_ESCAPE).empty() && !http_req->header(HTTP_ESCAPE).empty()) { |
492 | 0 | const auto& escape_str = http_req->header(HTTP_ESCAPE); |
493 | 0 | if (escape_str.length() != 1) { |
494 | 0 | return Status::InvalidArgument("escape must be single-char, actually is {}", |
495 | 0 | escape_str); |
496 | 0 | } |
497 | 0 | request.__set_escape(http_req->header(HTTP_ESCAPE)[0]); |
498 | 0 | } |
499 | 0 | if (!http_req->header(HTTP_PARTITIONS).empty()) { |
500 | 0 | request.__set_partitions(http_req->header(HTTP_PARTITIONS)); |
501 | 0 | request.__set_isTempPartition(false); |
502 | 0 | if (!http_req->header(HTTP_TEMP_PARTITIONS).empty()) { |
503 | 0 | return Status::InvalidArgument( |
504 | 0 | "Can not specify both partitions and temporary partitions"); |
505 | 0 | } |
506 | 0 | } |
507 | 0 | if (!http_req->header(HTTP_TEMP_PARTITIONS).empty()) { |
508 | 0 | request.__set_partitions(http_req->header(HTTP_TEMP_PARTITIONS)); |
509 | 0 | request.__set_isTempPartition(true); |
510 | 0 | if (!http_req->header(HTTP_PARTITIONS).empty()) { |
511 | 0 | return Status::InvalidArgument( |
512 | 0 | "Can not specify both partitions and temporary partitions"); |
513 | 0 | } |
514 | 0 | } |
515 | 0 | if (!http_req->header(HTTP_NEGATIVE).empty() && http_req->header(HTTP_NEGATIVE) == "true") { |
516 | 0 | request.__set_negative(true); |
517 | 0 | } else { |
518 | 0 | request.__set_negative(false); |
519 | 0 | } |
520 | 0 | bool strictMode = false; |
521 | 0 | if (!http_req->header(HTTP_STRICT_MODE).empty()) { |
522 | 0 | if (iequal(http_req->header(HTTP_STRICT_MODE), "false")) { |
523 | 0 | strictMode = false; |
524 | 0 | } else if (iequal(http_req->header(HTTP_STRICT_MODE), "true")) { |
525 | 0 | strictMode = true; |
526 | 0 | } else { |
527 | 0 | return Status::InvalidArgument("Invalid strict mode format. Must be bool type"); |
528 | 0 | } |
529 | 0 | request.__set_strictMode(strictMode); |
530 | 0 | } |
531 | | // timezone first. if not, try system time_zone |
532 | 0 | if (!http_req->header(HTTP_TIMEZONE).empty()) { |
533 | 0 | request.__set_timezone(http_req->header(HTTP_TIMEZONE)); |
534 | 0 | } else if (!http_req->header(HTTP_TIME_ZONE).empty()) { |
535 | 0 | request.__set_timezone(http_req->header(HTTP_TIME_ZONE)); |
536 | 0 | } |
537 | 0 | if (!http_req->header(HTTP_EXEC_MEM_LIMIT).empty()) { |
538 | 0 | try { |
539 | 0 | request.__set_execMemLimit(std::stoll(http_req->header(HTTP_EXEC_MEM_LIMIT))); |
540 | 0 | } catch (const std::invalid_argument& e) { |
541 | 0 | return Status::InvalidArgument("Invalid mem limit format, {}", e.what()); |
542 | 0 | } |
543 | 0 | } |
544 | 0 | if (!http_req->header(HTTP_JSONPATHS).empty()) { |
545 | 0 | request.__set_jsonpaths(http_req->header(HTTP_JSONPATHS)); |
546 | 0 | } |
547 | 0 | if (!http_req->header(HTTP_JSONROOT).empty()) { |
548 | 0 | request.__set_json_root(http_req->header(HTTP_JSONROOT)); |
549 | 0 | } |
550 | 0 | if (!http_req->header(HTTP_STRIP_OUTER_ARRAY).empty()) { |
551 | 0 | if (iequal(http_req->header(HTTP_STRIP_OUTER_ARRAY), "true")) { |
552 | 0 | request.__set_strip_outer_array(true); |
553 | 0 | } else { |
554 | 0 | request.__set_strip_outer_array(false); |
555 | 0 | } |
556 | 0 | } else { |
557 | 0 | request.__set_strip_outer_array(false); |
558 | 0 | } |
559 | |
|
560 | 0 | if (!http_req->header(HTTP_READ_JSON_BY_LINE).empty()) { |
561 | 0 | if (iequal(http_req->header(HTTP_READ_JSON_BY_LINE), "true")) { |
562 | 0 | request.__set_read_json_by_line(true); |
563 | 0 | } else { |
564 | 0 | request.__set_read_json_by_line(false); |
565 | 0 | } |
566 | 0 | } else { |
567 | 0 | request.__set_read_json_by_line(false); |
568 | 0 | } |
569 | |
|
570 | 0 | if (http_req->header(HTTP_READ_JSON_BY_LINE).empty() && |
571 | 0 | http_req->header(HTTP_STRIP_OUTER_ARRAY).empty()) { |
572 | 0 | request.__set_read_json_by_line(true); |
573 | 0 | request.__set_strip_outer_array(false); |
574 | 0 | } |
575 | |
|
576 | 0 | if (!http_req->header(HTTP_NUM_AS_STRING).empty()) { |
577 | 0 | if (iequal(http_req->header(HTTP_NUM_AS_STRING), "true")) { |
578 | 0 | request.__set_num_as_string(true); |
579 | 0 | } else { |
580 | 0 | request.__set_num_as_string(false); |
581 | 0 | } |
582 | 0 | } else { |
583 | 0 | request.__set_num_as_string(false); |
584 | 0 | } |
585 | 0 | if (!http_req->header(HTTP_FUZZY_PARSE).empty()) { |
586 | 0 | if (iequal(http_req->header(HTTP_FUZZY_PARSE), "true")) { |
587 | 0 | request.__set_fuzzy_parse(true); |
588 | 0 | } else { |
589 | 0 | request.__set_fuzzy_parse(false); |
590 | 0 | } |
591 | 0 | } else { |
592 | 0 | request.__set_fuzzy_parse(false); |
593 | 0 | } |
594 | |
|
595 | 0 | if (!http_req->header(HTTP_FUNCTION_COLUMN + "." + HTTP_SEQUENCE_COL).empty()) { |
596 | 0 | request.__set_sequence_col( |
597 | 0 | http_req->header(HTTP_FUNCTION_COLUMN + "." + HTTP_SEQUENCE_COL)); |
598 | 0 | } |
599 | |
|
600 | 0 | if (!http_req->header(HTTP_SEND_BATCH_PARALLELISM).empty()) { |
601 | 0 | int parallelism = DORIS_TRY(safe_stoi(http_req->header(HTTP_SEND_BATCH_PARALLELISM), |
602 | 0 | HTTP_SEND_BATCH_PARALLELISM)); |
603 | 0 | request.__set_send_batch_parallelism(parallelism); |
604 | 0 | } |
605 | | |
606 | 0 | if (!http_req->header(HTTP_LOAD_TO_SINGLE_TABLET).empty()) { |
607 | 0 | if (iequal(http_req->header(HTTP_LOAD_TO_SINGLE_TABLET), "true")) { |
608 | 0 | request.__set_load_to_single_tablet(true); |
609 | 0 | } else { |
610 | 0 | request.__set_load_to_single_tablet(false); |
611 | 0 | } |
612 | 0 | } |
613 | |
|
614 | 0 | if (ctx->timeout_second != -1) { |
615 | 0 | request.__set_timeout(ctx->timeout_second); |
616 | 0 | } |
617 | 0 | request.__set_thrift_rpc_timeout_ms(config::thrift_rpc_timeout_ms); |
618 | 0 | TMergeType::type merge_type = TMergeType::APPEND; |
619 | 0 | StringCaseMap<TMergeType::type> merge_type_map = {{"APPEND", TMergeType::APPEND}, |
620 | 0 | {"DELETE", TMergeType::DELETE}, |
621 | 0 | {"MERGE", TMergeType::MERGE}}; |
622 | 0 | if (!http_req->header(HTTP_MERGE_TYPE).empty()) { |
623 | 0 | std::string merge_type_str = http_req->header(HTTP_MERGE_TYPE); |
624 | 0 | auto iter = merge_type_map.find(merge_type_str); |
625 | 0 | if (iter != merge_type_map.end()) { |
626 | 0 | merge_type = iter->second; |
627 | 0 | } else { |
628 | 0 | return Status::InvalidArgument("Invalid merge type {}", merge_type_str); |
629 | 0 | } |
630 | 0 | if (merge_type == TMergeType::MERGE && http_req->header(HTTP_DELETE_CONDITION).empty()) { |
631 | 0 | return Status::InvalidArgument("Excepted DELETE ON clause when merge type is MERGE."); |
632 | 0 | } else if (merge_type != TMergeType::MERGE && |
633 | 0 | !http_req->header(HTTP_DELETE_CONDITION).empty()) { |
634 | 0 | return Status::InvalidArgument( |
635 | 0 | "Not support DELETE ON clause when merge type is not MERGE."); |
636 | 0 | } |
637 | 0 | } |
638 | 0 | request.__set_merge_type(merge_type); |
639 | 0 | if (!http_req->header(HTTP_DELETE_CONDITION).empty()) { |
640 | 0 | request.__set_delete_condition(http_req->header(HTTP_DELETE_CONDITION)); |
641 | 0 | } |
642 | |
|
643 | 0 | if (!http_req->header(HTTP_MAX_FILTER_RATIO).empty()) { |
644 | 0 | ctx->max_filter_ratio = strtod(http_req->header(HTTP_MAX_FILTER_RATIO).c_str(), nullptr); |
645 | 0 | request.__set_max_filter_ratio(ctx->max_filter_ratio); |
646 | 0 | } |
647 | |
|
648 | 0 | if (!http_req->header(HTTP_HIDDEN_COLUMNS).empty()) { |
649 | 0 | request.__set_hidden_columns(http_req->header(HTTP_HIDDEN_COLUMNS)); |
650 | 0 | } |
651 | 0 | if (!http_req->header(HTTP_TRIM_DOUBLE_QUOTES).empty()) { |
652 | 0 | if (iequal(http_req->header(HTTP_TRIM_DOUBLE_QUOTES), "true")) { |
653 | 0 | request.__set_trim_double_quotes(true); |
654 | 0 | } else { |
655 | 0 | request.__set_trim_double_quotes(false); |
656 | 0 | } |
657 | 0 | } |
658 | 0 | if (!http_req->header(HTTP_SKIP_LINES).empty()) { |
659 | 0 | int skip_lines = DORIS_TRY(safe_stoi(http_req->header(HTTP_SKIP_LINES), HTTP_SKIP_LINES)); |
660 | 0 | if (skip_lines < 0) { |
661 | 0 | return Status::InvalidArgument("Invalid 'skip_lines': {}", skip_lines); |
662 | 0 | } |
663 | 0 | request.__set_skip_lines(skip_lines); |
664 | 0 | } |
665 | 0 | if (!http_req->header(HTTP_ENABLE_PROFILE).empty()) { |
666 | 0 | if (iequal(http_req->header(HTTP_ENABLE_PROFILE), "true")) { |
667 | 0 | request.__set_enable_profile(true); |
668 | 0 | } else { |
669 | 0 | request.__set_enable_profile(false); |
670 | 0 | } |
671 | 0 | } |
672 | |
|
673 | 0 | if (!http_req->header(HTTP_UNIQUE_KEY_UPDATE_MODE).empty()) { |
674 | 0 | static const StringCaseMap<TUniqueKeyUpdateMode::type> unique_key_update_mode_map = { |
675 | 0 | {"UPSERT", TUniqueKeyUpdateMode::UPSERT}, |
676 | 0 | {"UPDATE_FIXED_COLUMNS", TUniqueKeyUpdateMode::UPDATE_FIXED_COLUMNS}, |
677 | 0 | {"UPDATE_FLEXIBLE_COLUMNS", TUniqueKeyUpdateMode::UPDATE_FLEXIBLE_COLUMNS}}; |
678 | 0 | std::string unique_key_update_mode_str = http_req->header(HTTP_UNIQUE_KEY_UPDATE_MODE); |
679 | 0 | auto iter = unique_key_update_mode_map.find(unique_key_update_mode_str); |
680 | 0 | if (iter != unique_key_update_mode_map.end()) { |
681 | 0 | TUniqueKeyUpdateMode::type unique_key_update_mode = iter->second; |
682 | 0 | if (unique_key_update_mode == TUniqueKeyUpdateMode::UPDATE_FLEXIBLE_COLUMNS) { |
683 | | // check constraints when flexible partial update is enabled |
684 | 0 | if (ctx->format != TFileFormatType::FORMAT_JSON) { |
685 | 0 | return Status::InvalidArgument( |
686 | 0 | "flexible partial update only support json format as input file " |
687 | 0 | "currently"); |
688 | 0 | } |
689 | 0 | if (!http_req->header(HTTP_FUZZY_PARSE).empty() && |
690 | 0 | iequal(http_req->header(HTTP_FUZZY_PARSE), "true")) { |
691 | 0 | return Status::InvalidArgument( |
692 | 0 | "Don't support flexible partial update when 'fuzzy_parse' is enabled"); |
693 | 0 | } |
694 | 0 | if (!http_req->header(HTTP_COLUMNS).empty()) { |
695 | 0 | return Status::InvalidArgument( |
696 | 0 | "Don't support flexible partial update when 'columns' is specified"); |
697 | 0 | } |
698 | 0 | if (!http_req->header(HTTP_JSONPATHS).empty()) { |
699 | 0 | return Status::InvalidArgument( |
700 | 0 | "Don't support flexible partial update when 'jsonpaths' is specified"); |
701 | 0 | } |
702 | 0 | if (!http_req->header(HTTP_HIDDEN_COLUMNS).empty()) { |
703 | 0 | return Status::InvalidArgument( |
704 | 0 | "Don't support flexible partial update when 'hidden_columns' is " |
705 | 0 | "specified"); |
706 | 0 | } |
707 | 0 | if (!http_req->header(HTTP_FUNCTION_COLUMN + "." + HTTP_SEQUENCE_COL).empty()) { |
708 | 0 | return Status::InvalidArgument( |
709 | 0 | "Don't support flexible partial update when " |
710 | 0 | "'function_column.sequence_col' is specified"); |
711 | 0 | } |
712 | 0 | if (!http_req->header(HTTP_MERGE_TYPE).empty()) { |
713 | 0 | return Status::InvalidArgument( |
714 | 0 | "Don't support flexible partial update when " |
715 | 0 | "'merge_type' is specified"); |
716 | 0 | } |
717 | 0 | if (!http_req->header(HTTP_WHERE).empty()) { |
718 | 0 | return Status::InvalidArgument( |
719 | 0 | "Don't support flexible partial update when " |
720 | 0 | "'where' is specified"); |
721 | 0 | } |
722 | 0 | } |
723 | 0 | request.__set_unique_key_update_mode(unique_key_update_mode); |
724 | 0 | } else { |
725 | 0 | return Status::InvalidArgument( |
726 | 0 | "Invalid unique_key_partial_mode {}, must be one of 'UPSERT', " |
727 | 0 | "'UPDATE_FIXED_COLUMNS' or 'UPDATE_FLEXIBLE_COLUMNS'", |
728 | 0 | unique_key_update_mode_str); |
729 | 0 | } |
730 | 0 | } |
731 | | |
732 | 0 | if (http_req->header(HTTP_UNIQUE_KEY_UPDATE_MODE).empty() && |
733 | 0 | !http_req->header(HTTP_PARTIAL_COLUMNS).empty()) { |
734 | | // only consider `partial_columns` parameter when `unique_key_update_mode` is not set |
735 | 0 | if (iequal(http_req->header(HTTP_PARTIAL_COLUMNS), "true")) { |
736 | 0 | request.__set_unique_key_update_mode(TUniqueKeyUpdateMode::UPDATE_FIXED_COLUMNS); |
737 | | // for backward compatibility |
738 | 0 | request.__set_partial_update(true); |
739 | 0 | } |
740 | 0 | } |
741 | |
|
742 | 0 | if (!http_req->header(HTTP_PARTIAL_UPDATE_NEW_ROW_POLICY).empty()) { |
743 | 0 | static const std::map<std::string, TPartialUpdateNewRowPolicy::type> policy_map { |
744 | 0 | {"APPEND", TPartialUpdateNewRowPolicy::APPEND}, |
745 | 0 | {"ERROR", TPartialUpdateNewRowPolicy::ERROR}}; |
746 | |
|
747 | 0 | auto policy_name = http_req->header(HTTP_PARTIAL_UPDATE_NEW_ROW_POLICY); |
748 | 0 | std::transform(policy_name.begin(), policy_name.end(), policy_name.begin(), |
749 | 0 | [](unsigned char c) { return std::toupper(c); }); |
750 | 0 | auto it = policy_map.find(policy_name); |
751 | 0 | if (it == policy_map.end()) { |
752 | 0 | return Status::InvalidArgument( |
753 | 0 | "Invalid partial_update_new_key_behavior {}, must be one of {'APPEND', " |
754 | 0 | "'ERROR'}", |
755 | 0 | policy_name); |
756 | 0 | } |
757 | 0 | request.__set_partial_update_new_key_policy(it->second); |
758 | 0 | } |
759 | | |
760 | 0 | if (!http_req->header(HTTP_MEMTABLE_ON_SINKNODE).empty()) { |
761 | 0 | bool value = iequal(http_req->header(HTTP_MEMTABLE_ON_SINKNODE), "true"); |
762 | 0 | request.__set_memtable_on_sink_node(value); |
763 | 0 | } |
764 | 0 | if (!http_req->header(HTTP_LOAD_STREAM_PER_NODE).empty()) { |
765 | 0 | int stream_per_node = DORIS_TRY( |
766 | 0 | safe_stoi(http_req->header(HTTP_LOAD_STREAM_PER_NODE), HTTP_LOAD_STREAM_PER_NODE)); |
767 | 0 | request.__set_stream_per_node(stream_per_node); |
768 | 0 | } |
769 | 0 | if (ctx->group_commit) { |
770 | 0 | if (!http_req->header(HTTP_GROUP_COMMIT).empty()) { |
771 | 0 | request.__set_group_commit_mode(http_req->header(HTTP_GROUP_COMMIT)); |
772 | 0 | } else { |
773 | | // used for wait_internal_group_commit_finish |
774 | 0 | request.__set_group_commit_mode("sync_mode"); |
775 | 0 | } |
776 | 0 | } |
777 | |
|
778 | 0 | if (!http_req->header(HTTP_COMPUTE_GROUP).empty()) { |
779 | 0 | request.__set_cloud_cluster(http_req->header(HTTP_COMPUTE_GROUP)); |
780 | 0 | } else if (!http_req->header(HTTP_CLOUD_CLUSTER).empty()) { |
781 | 0 | request.__set_cloud_cluster(http_req->header(HTTP_CLOUD_CLUSTER)); |
782 | 0 | } |
783 | |
|
784 | 0 | if (!http_req->header(HTTP_EMPTY_FIELD_AS_NULL).empty()) { |
785 | 0 | if (iequal(http_req->header(HTTP_EMPTY_FIELD_AS_NULL), "true")) { |
786 | 0 | request.__set_empty_field_as_null(true); |
787 | 0 | } |
788 | 0 | } |
789 | |
|
790 | | #ifndef BE_TEST |
791 | | // plan this load |
792 | | TNetworkAddress master_addr = _exec_env->cluster_info()->master_fe_addr; |
793 | | int64_t stream_load_put_start_time = MonotonicNanos(); |
794 | | RETURN_IF_ERROR(ThriftRpcHelper::rpc<FrontendServiceClient>( |
795 | | master_addr.hostname, master_addr.port, |
796 | | [&request, ctx](FrontendServiceConnection& client) { |
797 | | client->streamLoadPut(ctx->put_result, request); |
798 | | })); |
799 | | ctx->stream_load_put_cost_nanos = MonotonicNanos() - stream_load_put_start_time; |
800 | | #else |
801 | 0 | ctx->put_result = k_stream_load_put_result; |
802 | 0 | #endif |
803 | 0 | Status plan_status(Status::create(ctx->put_result.status)); |
804 | 0 | if (!plan_status.ok()) { |
805 | 0 | LOG(WARNING) << "plan streaming load failed. errmsg=" << plan_status << ctx->brief(); |
806 | 0 | return plan_status; |
807 | 0 | } |
808 | 0 | DCHECK(ctx->put_result.__isset.pipeline_params); |
809 | 0 | ctx->put_result.pipeline_params.query_options.__set_enable_strict_cast(false); |
810 | 0 | ctx->put_result.pipeline_params.query_options.__set_enable_insert_strict(strictMode); |
811 | 0 | if (config::is_cloud_mode() && ctx->two_phase_commit && ctx->is_mow_table()) { |
812 | 0 | return Status::NotSupported("stream load 2pc is unsupported for mow table"); |
813 | 0 | } |
814 | 0 | if (http_req->header(HTTP_GROUP_COMMIT) == "async_mode") { |
815 | | // FIXME find a way to avoid chunked stream load write large WALs |
816 | 0 | size_t content_length = 0; |
817 | 0 | if (!http_req->header(HttpHeaders::CONTENT_LENGTH).empty()) { |
818 | 0 | try { |
819 | 0 | content_length = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH)); |
820 | 0 | } catch (const std::exception& e) { |
821 | 0 | return Status::InvalidArgument("invalid HTTP header CONTENT_LENGTH={}: {}", |
822 | 0 | http_req->header(HttpHeaders::CONTENT_LENGTH), |
823 | 0 | e.what()); |
824 | 0 | } |
825 | 0 | if (ctx->format == TFileFormatType::FORMAT_CSV_GZ || |
826 | 0 | ctx->format == TFileFormatType::FORMAT_CSV_LZO || |
827 | 0 | ctx->format == TFileFormatType::FORMAT_CSV_BZ2 || |
828 | 0 | ctx->format == TFileFormatType::FORMAT_CSV_LZ4FRAME || |
829 | 0 | ctx->format == TFileFormatType::FORMAT_CSV_LZOP || |
830 | 0 | ctx->format == TFileFormatType::FORMAT_CSV_LZ4BLOCK || |
831 | 0 | ctx->format == TFileFormatType::FORMAT_CSV_SNAPPYBLOCK) { |
832 | 0 | content_length *= 3; |
833 | 0 | } |
834 | 0 | } |
835 | 0 | ctx->put_result.pipeline_params.__set_content_length(content_length); |
836 | 0 | } |
837 | | |
838 | 0 | VLOG_NOTICE << "params is " |
839 | 0 | << apache::thrift::ThriftDebugString(ctx->put_result.pipeline_params); |
840 | | // if we not use streaming, we must download total content before we begin |
841 | | // to process this load |
842 | 0 | if (!ctx->use_streaming) { |
843 | 0 | return Status::OK(); |
844 | 0 | } |
845 | | |
846 | 0 | TPipelineFragmentParamsList mocked; |
847 | 0 | return _exec_env->stream_load_executor()->execute_plan_fragment( |
848 | 0 | ctx, mocked, [http_req, this](std::shared_ptr<StreamLoadContext> ctx) { |
849 | 0 | _on_finish(ctx, http_req); |
850 | 0 | }); |
851 | 0 | } |
852 | | |
853 | | Status StreamLoadAction::_data_saved_path(HttpRequest* req, std::string* file_path, |
854 | 0 | int64_t file_bytes) { |
855 | 0 | std::string prefix; |
856 | 0 | RETURN_IF_ERROR(_exec_env->load_path_mgr()->allocate_dir(req->param(HTTP_DB_KEY), "", &prefix, |
857 | 0 | file_bytes)); |
858 | 0 | timeval tv; |
859 | 0 | gettimeofday(&tv, nullptr); |
860 | 0 | struct tm tm; |
861 | 0 | time_t cur_sec = tv.tv_sec; |
862 | 0 | localtime_r(&cur_sec, &tm); |
863 | 0 | char buf[64]; |
864 | 0 | strftime(buf, 64, "%Y%m%d%H%M%S", &tm); |
865 | 0 | std::stringstream ss; |
866 | 0 | ss << prefix << "/" << req->param(HTTP_TABLE_KEY) << "." << buf << "." << tv.tv_usec; |
867 | 0 | *file_path = ss.str(); |
868 | 0 | return Status::OK(); |
869 | 0 | } |
870 | | |
871 | | void StreamLoadAction::_save_stream_load_record(std::shared_ptr<StreamLoadContext> ctx, |
872 | 0 | const std::string& str) { |
873 | 0 | std::shared_ptr<StreamLoadRecorder> stream_load_recorder = |
874 | 0 | ExecEnv::GetInstance()->storage_engine().get_stream_load_recorder(); |
875 | |
|
876 | 0 | if (stream_load_recorder != nullptr) { |
877 | 0 | std::string key = |
878 | 0 | std::to_string(ctx->start_millis + ctx->load_cost_millis) + "_" + ctx->label; |
879 | 0 | auto st = stream_load_recorder->put(key, str); |
880 | 0 | if (st.ok()) { |
881 | 0 | LOG(INFO) << "put stream_load_record rocksdb successfully. label: " << ctx->label |
882 | 0 | << ", key: " << key; |
883 | 0 | } |
884 | 0 | } else { |
885 | 0 | LOG(WARNING) << "put stream_load_record rocksdb failed. stream_load_recorder is null."; |
886 | 0 | } |
887 | 0 | } |
888 | | |
889 | | Status StreamLoadAction::_handle_group_commit(HttpRequest* req, |
890 | 0 | std::shared_ptr<StreamLoadContext> ctx) { |
891 | 0 | std::string group_commit_mode = req->header(HTTP_GROUP_COMMIT); |
892 | 0 | if (!group_commit_mode.empty() && !iequal(group_commit_mode, "sync_mode") && |
893 | 0 | !iequal(group_commit_mode, "async_mode") && !iequal(group_commit_mode, "off_mode")) { |
894 | 0 | return Status::InvalidArgument( |
895 | 0 | "group_commit can only be [async_mode, sync_mode, off_mode]"); |
896 | 0 | } |
897 | 0 | if (config::wait_internal_group_commit_finish) { |
898 | 0 | group_commit_mode = "sync_mode"; |
899 | 0 | } |
900 | 0 | int64_t content_length = req->header(HttpHeaders::CONTENT_LENGTH).empty() |
901 | 0 | ? 0 |
902 | 0 | : std::stoll(req->header(HttpHeaders::CONTENT_LENGTH)); |
903 | 0 | if (content_length < 0) { |
904 | 0 | std::stringstream ss; |
905 | 0 | ss << "This stream load content length <0 (" << content_length |
906 | 0 | << "), please check your content length."; |
907 | 0 | LOG(WARNING) << ss.str(); |
908 | 0 | return Status::InvalidArgument(ss.str()); |
909 | 0 | } |
910 | | // allow chunked stream load in flink |
911 | 0 | auto is_chunk = !req->header(HttpHeaders::TRANSFER_ENCODING).empty() && |
912 | 0 | req->header(HttpHeaders::TRANSFER_ENCODING).find(CHUNK) != std::string::npos; |
913 | 0 | if (group_commit_mode.empty() || iequal(group_commit_mode, "off_mode") || |
914 | 0 | (content_length == 0 && !is_chunk)) { |
915 | | // off_mode and empty |
916 | 0 | ctx->group_commit = false; |
917 | 0 | return Status::OK(); |
918 | 0 | } |
919 | 0 | if (is_chunk) { |
920 | 0 | ctx->label = ""; |
921 | 0 | } |
922 | |
|
923 | 0 | auto partial_columns = !req->header(HTTP_PARTIAL_COLUMNS).empty() && |
924 | 0 | iequal(req->header(HTTP_PARTIAL_COLUMNS), "true"); |
925 | 0 | auto temp_partitions = !req->header(HTTP_TEMP_PARTITIONS).empty(); |
926 | 0 | auto partitions = !req->header(HTTP_PARTITIONS).empty(); |
927 | 0 | auto update_mode = |
928 | 0 | !req->header(HTTP_UNIQUE_KEY_UPDATE_MODE).empty() && |
929 | 0 | (iequal(req->header(HTTP_UNIQUE_KEY_UPDATE_MODE), "UPDATE_FIXED_COLUMNS") || |
930 | 0 | iequal(req->header(HTTP_UNIQUE_KEY_UPDATE_MODE), "UPDATE_FLEXIBLE_COLUMNS")); |
931 | 0 | if (!partial_columns && !partitions && !temp_partitions && !ctx->two_phase_commit && |
932 | 0 | !update_mode) { |
933 | 0 | if (!config::wait_internal_group_commit_finish && !ctx->label.empty()) { |
934 | 0 | return Status::InvalidArgument("label and group_commit can't be set at the same time"); |
935 | 0 | } |
936 | 0 | ctx->group_commit = true; |
937 | 0 | if (iequal(group_commit_mode, "async_mode")) { |
938 | 0 | if (!load_size_smaller_than_wal_limit(content_length)) { |
939 | 0 | std::stringstream ss; |
940 | 0 | ss << "There is no space for group commit stream load async WAL. This stream load " |
941 | 0 | "size is " |
942 | 0 | << content_length << ". WAL dir info: " |
943 | 0 | << ExecEnv::GetInstance()->wal_mgr()->get_wal_dirs_info_string(); |
944 | 0 | LOG(WARNING) << ss.str(); |
945 | 0 | return Status::Error<EXCEEDED_LIMIT>(ss.str()); |
946 | 0 | } |
947 | 0 | } |
948 | 0 | } |
949 | 0 | return Status::OK(); |
950 | 0 | } |
951 | | |
952 | | } // namespace doris |