Coverage Report

Created: 2025-04-14 12:46

/root/doris/be/src/io/hdfs_builder.cpp
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "io/hdfs_builder.h"
19
20
#include <fmt/format.h>
21
#include <gen_cpp/PlanNodes_types.h>
22
23
#include <cstdlib>
24
#include <fstream>
25
#include <utility>
26
#include <vector>
27
28
#include "agent/utils.h"
29
#include "common/config.h"
30
#include "common/logging.h"
31
#include "io/fs/hdfs.h"
32
#include "util/string_util.h"
33
#include "util/uid_util.h"
34
35
namespace doris {
36
37
#ifdef USE_HADOOP_HDFS
38
0
void err_log_message(const char* fmt, ...) {
39
0
    va_list args;
40
0
    va_start(args, fmt);
41
42
    // First, call vsnprintf to get the required buffer size
43
0
    int size = vsnprintf(nullptr, 0, fmt, args) + 1; // +1 for '\0'
44
0
    if (size <= 0) {
45
0
        LOG(ERROR) << "Error formatting log message, invalid size";
46
0
        va_end(args);
47
0
        return;
48
0
    }
49
50
0
    va_end(args);
51
0
    va_start(args, fmt); // Reinitialize va_list
52
53
    // Allocate a buffer and format the string into it
54
0
    std::vector<char> buffer(size);
55
0
    vsnprintf(buffer.data(), size, fmt, args);
56
57
0
    va_end(args);
58
59
    // Use glog to log the message
60
0
    LOG(ERROR) << buffer.data();
61
0
}
62
63
0
void va_err_log_message(const char* fmt, va_list ap) {
64
0
    va_list args_copy;
65
0
    va_copy(args_copy, ap);
66
67
    // Call vsnprintf to get the required buffer size
68
0
    int size = vsnprintf(nullptr, 0, fmt, args_copy) + 1; // +1 for '\0'
69
0
    va_end(args_copy);                                    // Release the copied va_list
70
71
0
    if (size <= 0) {
72
0
        LOG(ERROR) << "Error formatting log message, invalid size";
73
0
        return;
74
0
    }
75
76
    // Reinitialize va_list for the second vsnprintf call
77
0
    va_copy(args_copy, ap);
78
79
    // Allocate a buffer and format the string into it
80
0
    std::vector<char> buffer(size);
81
0
    vsnprintf(buffer.data(), size, fmt, args_copy);
82
83
0
    va_end(args_copy);
84
85
    // Use glog to log the message
86
0
    LOG(ERROR) << buffer.data();
87
0
}
88
89
struct hdfsLogger logger = {.errLogMessage = err_log_message,
90
                            .vaErrLogMessage = va_err_log_message};
91
#endif // #ifdef USE_HADOOP_HDFS
92
93
0
Status HDFSCommonBuilder::init_hdfs_builder() {
94
0
#ifdef USE_HADOOP_HDFS
95
0
    static std::once_flag flag;
96
0
    std::call_once(flag, []() { hdfsSetLogger(&logger); });
97
0
#endif // #ifdef USE_HADOOP_HDFS
98
99
0
    hdfs_builder = hdfsNewBuilder();
100
0
    if (hdfs_builder == nullptr) {
101
0
        LOG(INFO) << "failed to init HDFSCommonBuilder, please check check be/conf/hdfs-site.xml";
102
0
        return Status::InternalError(
103
0
                "failed to init HDFSCommonBuilder, please check check be/conf/hdfs-site.xml");
104
0
    }
105
0
    hdfsBuilderSetForceNewInstance(hdfs_builder);
106
0
    return Status::OK();
107
0
}
108
109
0
void HDFSCommonBuilder::set_hdfs_conf(const std::string& key, const std::string& val) {
110
0
    hdfs_conf[key] = val;
111
0
}
112
113
0
void HDFSCommonBuilder::set_hdfs_conf_to_hdfs_builder() {
114
0
    for (const auto& pair : hdfs_conf) {
115
0
        hdfsBuilderConfSetStr(hdfs_builder, pair.first.c_str(), pair.second.c_str());
116
0
    }
117
0
}
118
119
0
Status HDFSCommonBuilder::check_krb_params() {
120
0
    std::string ticket_path = doris::config::kerberos_ccache_path;
121
0
    if (!ticket_path.empty()) {
122
0
        hdfsBuilderConfSetStr(hdfs_builder, "hadoop.security.kerberos.ticket.cache.path",
123
0
                              ticket_path.c_str());
124
0
        return Status::OK();
125
0
    }
126
    // we should check hdfs_kerberos_principal and hdfs_kerberos_keytab nonnull to login kdc.
127
0
    if (hdfs_kerberos_principal.empty() || hdfs_kerberos_keytab.empty()) {
128
0
        return Status::InvalidArgument("Invalid hdfs_kerberos_principal or hdfs_kerberos_keytab");
129
0
    }
130
    // enable auto-renew thread
131
0
    hdfsBuilderConfSetStr(hdfs_builder, "hadoop.kerberos.keytab.login.autorenewal.enabled", "true");
132
0
    return Status::OK();
133
0
}
134
135
0
THdfsParams parse_properties(const std::map<std::string, std::string>& properties) {
136
0
    StringCaseMap<std::string> prop(properties.begin(), properties.end());
137
0
    std::vector<THdfsConf> hdfs_configs;
138
0
    THdfsParams hdfsParams;
139
0
    for (auto iter = prop.begin(); iter != prop.end();) {
140
0
        if (iter->first.compare(FS_KEY) == 0) {
141
0
            hdfsParams.__set_fs_name(iter->second);
142
0
            iter = prop.erase(iter);
143
0
        } else if (iter->first.compare(USER) == 0) {
144
0
            hdfsParams.__set_user(iter->second);
145
0
            iter = prop.erase(iter);
146
0
        } else if (iter->first.compare(KERBEROS_PRINCIPAL) == 0) {
147
0
            hdfsParams.__set_hdfs_kerberos_principal(iter->second);
148
0
            iter = prop.erase(iter);
149
0
        } else if (iter->first.compare(KERBEROS_KEYTAB) == 0) {
150
0
            hdfsParams.__set_hdfs_kerberos_keytab(iter->second);
151
0
            iter = prop.erase(iter);
152
0
        } else {
153
0
            THdfsConf item;
154
0
            item.key = iter->first;
155
0
            item.value = iter->second;
156
0
            hdfs_configs.push_back(item);
157
0
            iter = prop.erase(iter);
158
0
        }
159
0
    }
160
0
    if (!hdfsParams.__isset.user && std::getenv("HADOOP_USER_NAME") != nullptr) {
161
0
        hdfsParams.__set_user(std::getenv("HADOOP_USER_NAME"));
162
0
    }
163
0
    hdfsParams.__set_hdfs_conf(hdfs_configs);
164
0
    return hdfsParams;
165
0
}
166
167
Status create_hdfs_builder(const THdfsParams& hdfsParams, const std::string& fs_name,
168
0
                           HDFSCommonBuilder* builder) {
169
0
    RETURN_IF_ERROR(builder->init_hdfs_builder());
170
0
    builder->fs_name = fs_name;
171
0
    hdfsBuilderSetNameNode(builder->get(), builder->fs_name.c_str());
172
    // set kerberos conf
173
0
    if (hdfsParams.__isset.hdfs_kerberos_keytab) {
174
0
        builder->kerberos_login = true;
175
0
        builder->hdfs_kerberos_keytab = hdfsParams.hdfs_kerberos_keytab;
176
0
#ifdef USE_HADOOP_HDFS
177
0
        hdfsBuilderSetKerb5Conf(builder->get(), doris::config::kerberos_krb5_conf_path.c_str());
178
0
        hdfsBuilderSetKeyTabFile(builder->get(), builder->hdfs_kerberos_keytab.c_str());
179
0
#endif
180
0
    }
181
0
    if (hdfsParams.__isset.hdfs_kerberos_principal) {
182
0
        builder->kerberos_login = true;
183
0
        builder->hdfs_kerberos_principal = hdfsParams.hdfs_kerberos_principal;
184
0
        hdfsBuilderSetPrincipal(builder->get(), builder->hdfs_kerberos_principal.c_str());
185
0
    } else if (hdfsParams.__isset.user) {
186
0
        builder->hadoop_user = hdfsParams.user;
187
0
        hdfsBuilderSetUserName(builder->get(), builder->hadoop_user.c_str());
188
0
#ifdef USE_HADOOP_HDFS
189
0
        hdfsBuilderSetKerb5Conf(builder->get(), nullptr);
190
0
        hdfsBuilderSetKeyTabFile(builder->get(), nullptr);
191
0
#endif
192
0
    }
193
    // set other conf
194
0
    if (hdfsParams.__isset.hdfs_conf) {
195
0
        for (const THdfsConf& conf : hdfsParams.hdfs_conf) {
196
0
            builder->set_hdfs_conf(conf.key, conf.value);
197
0
            LOG(INFO) << "set hdfs config: " << conf.key << ", value: " << conf.value;
198
0
#ifdef USE_HADOOP_HDFS
199
            // Set krb5.conf, we should define java.security.krb5.conf in catalog properties
200
0
            if (strcmp(conf.key.c_str(), "java.security.krb5.conf") == 0) {
201
0
                builder->krb5_conf_path = conf.value;
202
0
                hdfsBuilderSetKerb5Conf(builder->get(), builder->krb5_conf_path.c_str());
203
0
            }
204
0
#endif
205
0
        }
206
0
        builder->set_hdfs_conf_to_hdfs_builder();
207
0
    }
208
0
    if (builder->is_kerberos()) {
209
0
        RETURN_IF_ERROR(builder->check_krb_params());
210
0
    }
211
0
    hdfsBuilderConfSetStr(builder->get(), "ipc.client.fallback-to-simple-auth-allowed", "true");
212
0
    return Status::OK();
213
0
}
214
215
Status create_hdfs_builder(const std::map<std::string, std::string>& properties,
216
0
                           HDFSCommonBuilder* builder) {
217
0
    THdfsParams hdfsParams = parse_properties(properties);
218
0
    return create_hdfs_builder(hdfsParams, hdfsParams.fs_name, builder);
219
0
}
220
221
} // namespace doris