Coverage Report

Created: 2026-04-10 10:11

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/service/http/action/http_stream.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "service/http/action/http_stream.h"
19
20
#include <cstddef>
21
#include <future>
22
#include <sstream>
23
24
// use string iequal
25
#include <event2/buffer.h>
26
#include <event2/bufferevent.h>
27
#include <event2/http.h>
28
#include <gen_cpp/FrontendService.h>
29
#include <gen_cpp/FrontendService_types.h>
30
#include <gen_cpp/HeartbeatService_types.h>
31
#include <rapidjson/prettywriter.h>
32
#include <thrift/protocol/TDebugProtocol.h>
33
34
#include "cloud/config.h"
35
#include "common/config.h"
36
#include "common/logging.h"
37
#include "common/metrics/doris_metrics.h"
38
#include "common/metrics/metrics.h"
39
#include "common/status.h"
40
#include "common/utils.h"
41
#include "io/fs/stream_load_pipe.h"
42
#include "load/group_commit/group_commit_mgr.h"
43
#include "load/load_path_mgr.h"
44
#include "load/stream_load/new_load_stream_mgr.h"
45
#include "load/stream_load/stream_load_context.h"
46
#include "load/stream_load/stream_load_executor.h"
47
#include "load/stream_load/stream_load_recorder.h"
48
#include "runtime/exec_env.h"
49
#include "runtime/fragment_mgr.h"
50
#include "service/http/http_channel.h"
51
#include "service/http/http_common.h"
52
#include "service/http/http_headers.h"
53
#include "service/http/http_request.h"
54
#include "service/http/utils.h"
55
#include "storage/storage_engine.h"
56
#include "util/byte_buffer.h"
57
#include "util/client_cache.h"
58
#include "util/string_util.h"
59
#include "util/thrift_rpc_helper.h"
60
#include "util/time.h"
61
#include "util/uid_util.h"
62
63
namespace doris {
64
using namespace ErrorCode;
65
66
DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(http_stream_requests_total, MetricUnit::REQUESTS);
67
DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(http_stream_duration_ms, MetricUnit::MILLISECONDS);
68
DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(http_stream_current_processing, MetricUnit::REQUESTS);
69
70
HttpStreamAction::HttpStreamAction(ExecEnv* exec_env)
71
6
        : HttpHandlerWithAuth(exec_env, TPrivilegeHier::GLOBAL, TPrivilegeType::LOAD) {
72
    // Use LOAD privilege type: requires LOAD permission
73
    // Note: _exec_env is set by parent class HttpHandlerWithAuth
74
6
    _http_stream_entity =
75
6
            DorisMetrics::instance()->metric_registry()->register_entity("http_stream");
76
6
    INT_COUNTER_METRIC_REGISTER(_http_stream_entity, http_stream_requests_total);
77
6
    INT_COUNTER_METRIC_REGISTER(_http_stream_entity, http_stream_duration_ms);
78
6
    INT_GAUGE_METRIC_REGISTER(_http_stream_entity, http_stream_current_processing);
79
6
}
80
81
3
HttpStreamAction::~HttpStreamAction() {
82
3
    DorisMetrics::instance()->metric_registry()->deregister_entity(_http_stream_entity);
83
3
}
84
85
132
void HttpStreamAction::handle(HttpRequest* req) {
86
132
    std::shared_ptr<StreamLoadContext> ctx =
87
132
            std::static_pointer_cast<StreamLoadContext>(req->handler_ctx());
88
132
    if (ctx == nullptr) {
89
0
        return;
90
0
    }
91
92
    // status already set to fail
93
132
    if (ctx->status.ok()) {
94
111
        ctx->status = _handle(req, ctx);
95
111
        if (!ctx->status.ok() && !ctx->status.is<PUBLISH_TIMEOUT>()) {
96
3
            LOG(WARNING) << "handle streaming load failed, id=" << ctx->id
97
3
                         << ", errmsg=" << ctx->status;
98
3
        }
99
111
    }
100
132
    ctx->load_cost_millis = UnixMillis() - ctx->start_millis;
101
102
132
    if (!ctx->status.ok() && !ctx->status.is<PUBLISH_TIMEOUT>()) {
103
24
        if (ctx->body_sink != nullptr) {
104
24
            ctx->body_sink->cancel(ctx->status.to_string());
105
24
        }
106
24
    }
107
108
132
    if (!ctx->status.ok()) {
109
24
        auto str = std::string(ctx->to_json());
110
        // add new line at end
111
24
        str = str + '\n';
112
24
        HttpChannel::send_reply(req, str);
113
24
        return;
114
24
    }
115
108
    auto str = std::string(ctx->to_json());
116
    // add new line at end
117
108
    str = str + '\n';
118
108
    HttpChannel::send_reply(req, str);
119
108
    if (config::enable_stream_load_record || config::enable_stream_load_record_to_audit_log_table) {
120
108
        if (req->header(HTTP_SKIP_RECORD_TO_AUDIT_LOG_TABLE).empty()) {
121
108
            str = ctx->prepare_stream_load_record(str);
122
108
            _save_stream_load_record(ctx, str);
123
108
        }
124
108
    }
125
    // update statistics
126
108
    http_stream_requests_total->increment(1);
127
108
    http_stream_duration_ms->increment(ctx->load_cost_millis);
128
108
}
129
130
111
Status HttpStreamAction::_handle(HttpRequest* http_req, std::shared_ptr<StreamLoadContext> ctx) {
131
111
    if (ctx->body_bytes > 0 && ctx->receive_bytes != ctx->body_bytes) {
132
0
        LOG(WARNING) << "recevie body don't equal with body bytes, body_bytes=" << ctx->body_bytes
133
0
                     << ", receive_bytes=" << ctx->receive_bytes << ", id=" << ctx->id;
134
0
        return Status::Error<ErrorCode::NETWORK_ERROR>("receive body don't equal with body bytes");
135
0
    }
136
111
    RETURN_IF_ERROR(ctx->body_sink->finish());
137
138
    // wait stream load finish
139
111
    RETURN_IF_ERROR(ctx->load_status_future.get());
140
141
108
    if (ctx->group_commit) {
142
11
        LOG(INFO) << "skip commit because this is group commit, pipe_id=" << ctx->id.to_string();
143
11
        return Status::OK();
144
11
    }
145
146
97
    if (ctx->two_phase_commit) {
147
1
        int64_t pre_commit_start_time = MonotonicNanos();
148
1
        RETURN_IF_ERROR(_exec_env->stream_load_executor()->pre_commit_txn(ctx.get()));
149
1
        ctx->pre_commit_txn_cost_nanos = MonotonicNanos() - pre_commit_start_time;
150
96
    } else {
151
        // If put file success we need commit this load
152
96
        int64_t commit_and_publish_start_time = MonotonicNanos();
153
96
        RETURN_IF_ERROR(_exec_env->stream_load_executor()->commit_txn(ctx.get()));
154
96
        ctx->commit_and_publish_txn_cost_nanos = MonotonicNanos() - commit_and_publish_start_time;
155
96
    }
156
97
    return Status::OK();
157
97
}
158
159
132
int HttpStreamAction::on_header(HttpRequest* req) {
160
    // Call parent's auth check first
161
132
    int ret = HttpHandlerWithAuth::on_header(req);
162
132
    if (ret != 0) {
163
0
        return ret; // Auth failed, return error
164
0
    }
165
166
132
    http_stream_current_processing->increment(1);
167
168
132
    std::shared_ptr<StreamLoadContext> ctx = std::make_shared<StreamLoadContext>(_exec_env);
169
132
    req->set_handler_ctx(ctx);
170
171
132
    ctx->load_type = TLoadType::MANUL_LOAD;
172
132
    ctx->load_src_type = TLoadSourceType::RAW;
173
132
    ctx->two_phase_commit = req->header(HTTP_TWO_PHASE_COMMIT) == "true";
174
132
    Status st = _handle_group_commit(req, ctx);
175
176
132
    LOG(INFO) << "new income streaming load request." << ctx->brief()
177
132
              << " sql : " << req->header(HTTP_SQL) << ", group_commit=" << ctx->group_commit;
178
132
    if (st.ok()) {
179
132
        st = _on_header(req, ctx);
180
132
    }
181
132
    if (!st.ok()) {
182
0
        ctx->status = std::move(st);
183
0
        if (ctx->body_sink != nullptr) {
184
0
            ctx->body_sink->cancel(ctx->status.to_string());
185
0
        }
186
0
        auto str = ctx->to_json();
187
        // add new line at end
188
0
        str = str + '\n';
189
0
        HttpChannel::send_reply(req, str);
190
0
        if (config::enable_stream_load_record ||
191
0
            config::enable_stream_load_record_to_audit_log_table) {
192
0
            if (req->header(HTTP_SKIP_RECORD_TO_AUDIT_LOG_TABLE).empty()) {
193
0
                str = ctx->prepare_stream_load_record(str);
194
0
                _save_stream_load_record(ctx, str);
195
0
            }
196
0
        }
197
0
        return -1;
198
0
    }
199
132
    return 0;
200
132
}
201
202
132
Status HttpStreamAction::_on_header(HttpRequest* http_req, std::shared_ptr<StreamLoadContext> ctx) {
203
    // auth information
204
132
    if (!parse_basic_auth(*http_req, &ctx->auth)) {
205
0
        LOG(WARNING) << "parse basic authorization failed." << ctx->brief();
206
0
        return Status::NotAuthorized("no valid Basic authorization");
207
0
    }
208
209
    // TODO(zs) : need Need to request an FE to obtain information such as format
210
    // check content length
211
132
    ctx->body_bytes = 0;
212
132
    size_t csv_max_body_bytes = config::streaming_load_max_mb * 1024 * 1024;
213
132
    if (!http_req->header(HttpHeaders::CONTENT_LENGTH).empty()) {
214
129
        try {
215
129
            ctx->body_bytes = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH));
216
129
        } catch (const std::exception& e) {
217
0
            return Status::InvalidArgument("invalid HTTP header CONTENT_LENGTH={}: {}",
218
0
                                           http_req->header(HttpHeaders::CONTENT_LENGTH), e.what());
219
0
        }
220
        // csv max body size
221
129
        if (ctx->body_bytes > csv_max_body_bytes) {
222
0
            LOG(WARNING) << "body exceed max size." << ctx->brief();
223
0
            return Status::Error<ErrorCode::EXCEEDED_LIMIT>(
224
0
                    "body size {} exceed BE's conf `streaming_load_max_mb` {}. increase it if you "
225
0
                    "are sure this load is reasonable",
226
0
                    ctx->body_bytes, csv_max_body_bytes);
227
0
        }
228
129
    }
229
230
132
    auto pipe = std::make_shared<io::StreamLoadPipe>(
231
132
            io::kMaxPipeBufferedBytes /* max_buffered_bytes */, 64 * 1024 /* min_chunk_size */,
232
132
            ctx->body_bytes /* total_length */);
233
132
    ctx->body_sink = pipe;
234
132
    ctx->pipe = pipe;
235
236
132
    RETURN_IF_ERROR(_exec_env->new_load_stream_mgr()->put(ctx->id, ctx));
237
238
    // Here, transactions are set from fe's NativeInsertStmt.
239
    // TODO(zs) : How to support two_phase_commit
240
241
132
    return Status::OK();
242
132
}
243
244
9.66k
void HttpStreamAction::on_chunk_data(HttpRequest* req) {
245
9.66k
    std::shared_ptr<StreamLoadContext> ctx =
246
9.66k
            std::static_pointer_cast<StreamLoadContext>(req->handler_ctx());
247
9.66k
    if (ctx == nullptr || !ctx->status.ok()) {
248
8
        return;
249
8
    }
250
9.65k
    if (!req->header(HTTP_WAL_ID_KY).empty()) {
251
0
        ctx->wal_id = std::stoll(req->header(HTTP_WAL_ID_KY));
252
0
    }
253
9.65k
    struct evhttp_request* ev_req = req->get_evhttp_request();
254
9.65k
    auto evbuf = evhttp_request_get_input_buffer(ev_req);
255
256
    // In HttpStreamAction::on_chunk_data
257
    //      -> process_put
258
    //      -> StreamLoadExecutor::execute_plan_fragment
259
    //      -> exec_plan_fragment
260
    // , SCOPED_ATTACH_TASK will be called.
261
9.65k
    SCOPED_SWITCH_THREAD_MEM_TRACKER_LIMITER(ExecEnv::GetInstance()->stream_load_pipe_tracker());
262
263
9.65k
    int64_t start_read_data_time = MonotonicNanos();
264
9.65k
    Status st = ctx->allocate_schema_buffer();
265
9.65k
    if (!st.ok()) {
266
0
        ctx->status = st;
267
0
        return;
268
0
    }
269
19.3k
    while (evbuffer_get_length(evbuf) > 0) {
270
9.65k
        ByteBufferPtr bb;
271
9.65k
        st = ByteBuffer::allocate(128 * 1024, &bb);
272
9.65k
        if (!st.ok()) {
273
0
            ctx->status = st;
274
0
            return;
275
0
        }
276
9.65k
        auto remove_bytes = evbuffer_remove(evbuf, bb->ptr, bb->capacity);
277
9.65k
        bb->pos = remove_bytes;
278
9.65k
        bb->flip();
279
9.65k
        st = ctx->body_sink->append(bb);
280
        // schema_buffer stores 1M of data for parsing column information
281
        // need to determine whether to cache for the first time
282
9.65k
        if (ctx->is_read_schema) {
283
132
            if (ctx->schema_buffer()->pos + remove_bytes < config::stream_tvf_buffer_size) {
284
132
                ctx->schema_buffer()->put_bytes(bb->ptr, remove_bytes);
285
132
            } else {
286
0
                LOG(INFO) << "use a portion of data to request fe to obtain column information";
287
0
                ctx->is_read_schema = false;
288
0
                ctx->status = process_put(req, ctx);
289
0
            }
290
132
        }
291
9.65k
        if (!st.ok()) {
292
0
            LOG(WARNING) << "append body content failed. errmsg=" << st << ", " << ctx->brief();
293
0
            ctx->status = st;
294
0
            return;
295
0
        }
296
9.65k
        ctx->receive_bytes += remove_bytes;
297
9.65k
    }
298
    // after all the data has been read and it has not reached 1M, it will execute here
299
9.65k
    if (ctx->is_read_schema) {
300
132
        LOG(INFO) << "after all the data has been read and it has not reached 1M, it will execute "
301
132
                  << "here";
302
132
        ctx->is_read_schema = false;
303
132
        ctx->status = process_put(req, ctx);
304
132
    }
305
9.65k
    ctx->read_data_cost_nanos += (MonotonicNanos() - start_read_data_time);
306
9.65k
}
307
308
132
void HttpStreamAction::free_handler_ctx(std::shared_ptr<void> param) {
309
132
    std::shared_ptr<StreamLoadContext> ctx = std::static_pointer_cast<StreamLoadContext>(param);
310
132
    if (ctx == nullptr) {
311
0
        return;
312
0
    }
313
    // sender is gone, make receiver know it
314
132
    if (ctx->body_sink != nullptr) {
315
132
        ctx->body_sink->cancel("sender is gone");
316
132
    }
317
    // remove stream load context from stream load manager and the resource will be released
318
132
    ctx->exec_env()->new_load_stream_mgr()->remove(ctx->id);
319
132
    http_stream_current_processing->increment(-1);
320
132
}
321
322
Status HttpStreamAction::process_put(HttpRequest* http_req,
323
132
                                     std::shared_ptr<StreamLoadContext> ctx) {
324
132
    TStreamLoadPutRequest request;
325
132
    if (http_req != nullptr) {
326
132
        request.__set_load_sql(http_req->header(HTTP_SQL));
327
132
        if (!http_req->header(HTTP_MEMTABLE_ON_SINKNODE).empty()) {
328
0
            bool value = iequal(http_req->header(HTTP_MEMTABLE_ON_SINKNODE), "true");
329
0
            request.__set_memtable_on_sink_node(value);
330
0
        }
331
132
    } else {
332
0
        request.__set_token(ctx->auth.token);
333
0
        request.__set_load_sql(ctx->sql_str);
334
0
        ctx->auth.token = "";
335
0
    }
336
132
    set_request_auth(&request, ctx->auth);
337
132
    request.__set_loadId(ctx->id.to_thrift());
338
132
    request.__set_label(ctx->label);
339
132
    if (ctx->group_commit) {
340
12
        if (!http_req->header(HTTP_GROUP_COMMIT).empty()) {
341
12
            request.__set_group_commit_mode(http_req->header(HTTP_GROUP_COMMIT));
342
12
        } else {
343
            // used for wait_internal_group_commit_finish
344
0
            request.__set_group_commit_mode("sync_mode");
345
0
        }
346
12
    }
347
132
    if (_exec_env->cluster_info()->backend_id != 0) {
348
132
        request.__set_backend_id(_exec_env->cluster_info()->backend_id);
349
132
    } else {
350
0
        LOG(WARNING) << "_exec_env->cluster_info not set backend_id";
351
0
    }
352
132
    if (ctx->wal_id > 0) {
353
0
        request.__set_partial_update(false);
354
0
    }
355
356
    // plan this load
357
132
    TNetworkAddress master_addr = _exec_env->cluster_info()->master_fe_addr;
358
132
    int64_t stream_load_put_start_time = MonotonicNanos();
359
132
    RETURN_IF_ERROR(ThriftRpcHelper::rpc<FrontendServiceClient>(
360
132
            master_addr.hostname, master_addr.port,
361
132
            [&request, ctx](FrontendServiceConnection& client) {
362
132
                client->streamLoadPut(ctx->put_result, request);
363
132
            }));
364
132
    ctx->put_result.pipeline_params.query_options.__set_enable_strict_cast(false);
365
132
    ctx->stream_load_put_cost_nanos = MonotonicNanos() - stream_load_put_start_time;
366
132
    Status plan_status(Status::create(ctx->put_result.status));
367
132
    if (!plan_status.ok()) {
368
20
        LOG(WARNING) << "plan streaming load failed. errmsg=" << plan_status << ctx->brief();
369
20
        return plan_status;
370
20
    }
371
112
    if (config::is_cloud_mode() && ctx->two_phase_commit && ctx->is_mow_table()) {
372
1
        return Status::NotSupported("http stream 2pc is unsupported for mow table");
373
1
    }
374
111
    ctx->db = ctx->put_result.pipeline_params.db_name;
375
111
    ctx->table = ctx->put_result.pipeline_params.table_name;
376
111
    ctx->txn_id = ctx->put_result.pipeline_params.txn_conf.txn_id;
377
111
    ctx->label = ctx->put_result.pipeline_params.import_label;
378
111
    ctx->put_result.pipeline_params.__set_wal_id(ctx->wal_id);
379
111
    if (http_req != nullptr && http_req->header(HTTP_GROUP_COMMIT) == "async_mode") {
380
        // FIXME find a way to avoid chunked stream load write large WALs
381
11
        size_t content_length = 0;
382
11
        if (!http_req->header(HttpHeaders::CONTENT_LENGTH).empty()) {
383
9
            try {
384
9
                content_length = std::stol(http_req->header(HttpHeaders::CONTENT_LENGTH));
385
9
            } catch (const std::exception& e) {
386
0
                return Status::InvalidArgument("invalid HTTP header CONTENT_LENGTH={}: {}",
387
0
                                               http_req->header(HttpHeaders::CONTENT_LENGTH),
388
0
                                               e.what());
389
0
            }
390
9
            if (ctx->format == TFileFormatType::FORMAT_CSV_GZ ||
391
9
                ctx->format == TFileFormatType::FORMAT_CSV_LZO ||
392
9
                ctx->format == TFileFormatType::FORMAT_CSV_BZ2 ||
393
9
                ctx->format == TFileFormatType::FORMAT_CSV_LZ4FRAME ||
394
9
                ctx->format == TFileFormatType::FORMAT_CSV_LZOP ||
395
9
                ctx->format == TFileFormatType::FORMAT_CSV_LZ4BLOCK ||
396
9
                ctx->format == TFileFormatType::FORMAT_CSV_SNAPPYBLOCK) {
397
0
                content_length *= 3;
398
0
            }
399
9
        }
400
11
        ctx->put_result.pipeline_params.__set_content_length(content_length);
401
11
    }
402
111
    TPipelineFragmentParamsList mocked;
403
111
    return _exec_env->stream_load_executor()->execute_plan_fragment(ctx, mocked);
404
111
}
405
406
void HttpStreamAction::_save_stream_load_record(std::shared_ptr<StreamLoadContext> ctx,
407
108
                                                const std::string& str) {
408
108
    std::shared_ptr<StreamLoadRecorder> stream_load_recorder =
409
108
            ExecEnv::GetInstance()->storage_engine().get_stream_load_recorder();
410
411
108
    if (stream_load_recorder != nullptr) {
412
108
        std::string key =
413
108
                std::to_string(ctx->start_millis + ctx->load_cost_millis) + "_" + ctx->label;
414
108
        auto st = stream_load_recorder->put(key, str);
415
108
        if (st.ok()) {
416
108
            LOG(INFO) << "put stream_load_record rocksdb successfully. label: " << ctx->label
417
108
                      << ", key: " << key;
418
108
        }
419
108
    } else {
420
0
        LOG(WARNING) << "put stream_load_record rocksdb failed. stream_load_recorder is null.";
421
0
    }
422
108
}
423
424
Status HttpStreamAction::_handle_group_commit(HttpRequest* req,
425
132
                                              std::shared_ptr<StreamLoadContext> ctx) {
426
132
    std::string group_commit_mode = req->header(HTTP_GROUP_COMMIT);
427
132
    if (!group_commit_mode.empty() && !iequal(group_commit_mode, "sync_mode") &&
428
132
        !iequal(group_commit_mode, "async_mode") && !iequal(group_commit_mode, "off_mode")) {
429
0
        return Status::InvalidArgument(
430
0
                "group_commit can only be [async_mode, sync_mode, off_mode]");
431
0
    }
432
132
    if (config::wait_internal_group_commit_finish) {
433
0
        group_commit_mode = "sync_mode";
434
0
    }
435
132
    int64_t content_length = req->header(HttpHeaders::CONTENT_LENGTH).empty()
436
132
                                     ? 0
437
132
                                     : std::stoll(req->header(HttpHeaders::CONTENT_LENGTH));
438
132
    if (content_length < 0) {
439
0
        std::stringstream ss;
440
0
        ss << "This http load content length <0 (" << content_length
441
0
           << "), please check your content length.";
442
0
        LOG(WARNING) << ss.str();
443
0
        return Status::InvalidArgument(ss.str());
444
0
    }
445
    // allow chunked stream load in flink
446
132
    auto is_chunk =
447
132
            !req->header(HttpHeaders::TRANSFER_ENCODING).empty() &&
448
132
            req->header(HttpHeaders::TRANSFER_ENCODING).find("chunked") != std::string::npos;
449
132
    if (group_commit_mode.empty() || iequal(group_commit_mode, "off_mode") ||
450
132
        (content_length == 0 && !is_chunk)) {
451
        // off_mode and empty
452
120
        ctx->group_commit = false;
453
120
        return Status::OK();
454
120
    }
455
12
    if (is_chunk) {
456
2
        ctx->label = "";
457
2
    }
458
459
12
    auto partial_columns = !req->header(HTTP_PARTIAL_COLUMNS).empty() &&
460
12
                           iequal(req->header(HTTP_PARTIAL_COLUMNS), "true");
461
12
    auto temp_partitions = !req->header(HTTP_TEMP_PARTITIONS).empty();
462
12
    auto partitions = !req->header(HTTP_PARTITIONS).empty();
463
12
    if (!partial_columns && !partitions && !temp_partitions && !ctx->two_phase_commit) {
464
12
        if (!config::wait_internal_group_commit_finish && !ctx->label.empty()) {
465
0
            return Status::InvalidArgument("label and group_commit can't be set at the same time");
466
0
        }
467
12
        ctx->group_commit = true;
468
12
        if (iequal(group_commit_mode, "async_mode")) {
469
12
            if (!load_size_smaller_than_wal_limit(content_length)) {
470
0
                std::stringstream ss;
471
0
                ss << "There is no space for group commit http load async WAL. This http load "
472
0
                      "size is "
473
0
                   << content_length << ". WAL dir info: "
474
0
                   << ExecEnv::GetInstance()->wal_mgr()->get_wal_dirs_info_string();
475
0
                LOG(WARNING) << ss.str();
476
0
                return Status::Error<EXCEEDED_LIMIT>(ss.str());
477
0
            }
478
12
        }
479
12
    }
480
12
    return Status::OK();
481
12
}
482
483
} // namespace doris