Coverage Report

Created: 2026-04-11 14:25

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/io/fs/buffered_reader.cpp
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#include "io/fs/buffered_reader.h"
19
20
#include <bvar/reducer.h>
21
#include <bvar/window.h>
22
#include <string.h>
23
24
#include <algorithm>
25
#include <chrono>
26
#include <cstdint>
27
#include <memory>
28
29
#include "common/cast_set.h"
30
#include "common/compiler_util.h" // IWYU pragma: keep
31
#include "common/config.h"
32
#include "common/status.h"
33
#include "core/custom_allocator.h"
34
#include "runtime/exec_env.h"
35
#include "runtime/runtime_profile.h"
36
#include "runtime/thread_context.h"
37
#include "runtime/workload_management/io_throttle.h"
38
#include "util/slice.h"
39
#include "util/threadpool.h"
40
namespace doris {
41
42
namespace io {
43
struct IOContext;
44
45
// add bvar to capture the download bytes per second by buffered reader
46
bvar::Adder<uint64_t> g_bytes_downloaded("buffered_reader", "bytes_downloaded");
47
bvar::PerSecond<bvar::Adder<uint64_t>> g_bytes_downloaded_per_second("buffered_reader",
48
                                                                     "bytes_downloaded_per_second",
49
                                                                     &g_bytes_downloaded, 60);
50
51
Status MergeRangeFileReader::read_at_impl(size_t offset, Slice result, size_t* bytes_read,
52
450k
                                          const IOContext* io_ctx) {
53
450k
    _statistics.request_io++;
54
450k
    *bytes_read = 0;
55
450k
    if (result.size == 0) {
56
0
        return Status::OK();
57
0
    }
58
450k
    const int range_index = _search_read_range(offset, offset + result.size);
59
450k
    if (range_index < 0) {
60
0
        SCOPED_RAW_TIMER(&_statistics.read_time);
61
0
        Status st = _reader->read_at(offset, result, bytes_read, io_ctx);
62
0
        _statistics.merged_io++;
63
0
        _statistics.request_bytes += *bytes_read;
64
0
        _statistics.merged_bytes += *bytes_read;
65
0
        return st;
66
0
    }
67
450k
    if (offset + result.size > _random_access_ranges[range_index].end_offset) {
68
        // return _reader->read_at(offset, result, bytes_read, io_ctx);
69
0
        return Status::IOError("Range in RandomAccessReader should be read sequentially");
70
0
    }
71
72
450k
    size_t has_read = 0;
73
450k
    RangeCachedData& cached_data = _range_cached_data[range_index];
74
450k
    cached_data.has_read = true;
75
450k
    if (cached_data.contains(offset)) {
76
        // has cached data in box
77
445k
        _read_in_box(cached_data, offset, result, &has_read);
78
445k
        _statistics.request_bytes += has_read;
79
445k
        if (has_read == result.size) {
80
            // all data is read in cache
81
445k
            *bytes_read = has_read;
82
445k
            return Status::OK();
83
445k
        }
84
445k
    } else if (!cached_data.empty()) {
85
        // the data in range may be skipped or ignored
86
6
        for (int16_t box_index : cached_data.ref_box) {
87
6
            _dec_box_ref(box_index);
88
6
        }
89
6
        cached_data.reset();
90
6
    }
91
92
4.76k
    size_t to_read = result.size - has_read;
93
4.77k
    if (to_read >= SMALL_IO || to_read >= _remaining) {
94
0
        SCOPED_RAW_TIMER(&_statistics.read_time);
95
0
        size_t read_size = 0;
96
0
        RETURN_IF_ERROR(_reader->read_at(offset + has_read, Slice(result.data + has_read, to_read),
97
0
                                         &read_size, io_ctx));
98
0
        *bytes_read = has_read + read_size;
99
0
        _statistics.merged_io++;
100
0
        _statistics.request_bytes += read_size;
101
0
        _statistics.merged_bytes += read_size;
102
0
        return Status::OK();
103
0
    }
104
105
    // merge small IO
106
4.76k
    size_t merge_start = offset + has_read;
107
4.76k
    const size_t merge_end = merge_start + _merged_read_slice_size;
108
    // <slice_size, is_content>
109
4.76k
    std::vector<std::pair<size_t, bool>> merged_slice;
110
4.76k
    size_t content_size = 0;
111
4.76k
    size_t hollow_size = 0;
112
4.76k
    if (merge_start > _random_access_ranges[range_index].end_offset) {
113
0
        return Status::IOError("Fail to merge small IO");
114
0
    }
115
4.76k
    int merge_index = range_index;
116
47.5k
    while (merge_start < merge_end && merge_index < _random_access_ranges.size()) {
117
43.0k
        size_t content_max = _remaining - content_size;
118
43.0k
        if (content_max == 0) {
119
0
            break;
120
0
        }
121
43.0k
        if (merge_index != range_index && _range_cached_data[merge_index].has_read) {
122
            // don't read or merge twice
123
0
            break;
124
0
        }
125
43.0k
        if (_random_access_ranges[merge_index].end_offset > merge_end) {
126
103
            size_t add_content = std::min(merge_end - merge_start, content_max);
127
103
            content_size += add_content;
128
103
            merge_start += add_content;
129
103
            merged_slice.emplace_back(add_content, true);
130
103
            break;
131
103
        }
132
42.9k
        size_t add_content =
133
42.9k
                std::min(_random_access_ranges[merge_index].end_offset - merge_start, content_max);
134
42.9k
        content_size += add_content;
135
42.9k
        merge_start += add_content;
136
42.9k
        merged_slice.emplace_back(add_content, true);
137
42.9k
        if (merge_start != _random_access_ranges[merge_index].end_offset) {
138
0
            break;
139
0
        }
140
42.9k
        if (merge_index < _random_access_ranges.size() - 1 && merge_start < merge_end) {
141
38.5k
            size_t gap = _random_access_ranges[merge_index + 1].start_offset -
142
38.5k
                         _random_access_ranges[merge_index].end_offset;
143
38.5k
            if ((content_size + hollow_size) > SMALL_IO && gap >= SMALL_IO) {
144
                // too large gap
145
0
                break;
146
0
            }
147
38.5k
            if (gap < merge_end - merge_start && content_size < _remaining &&
148
38.5k
                !_range_cached_data[merge_index + 1].has_read) {
149
38.3k
                hollow_size += gap;
150
38.3k
                merge_start = _random_access_ranges[merge_index + 1].start_offset;
151
38.3k
                merged_slice.emplace_back(gap, false);
152
38.3k
            } else {
153
                // there's no enough memory to read hollow data
154
236
                break;
155
236
            }
156
38.5k
        }
157
42.7k
        merge_index++;
158
42.7k
    }
159
4.76k
    content_size = 0;
160
4.76k
    hollow_size = 0;
161
4.76k
    std::vector<std::pair<double, size_t>> ratio_and_size;
162
    // Calculate the read amplified ratio for each merge operation and the size of the merged data.
163
    // Find the largest size of the merged data whose amplified ratio is less than config::max_amplified_read_ratio
164
81.4k
    for (const std::pair<size_t, bool>& slice : merged_slice) {
165
81.4k
        if (slice.second) {
166
43.0k
            content_size += slice.first;
167
43.0k
            if (slice.first > 0) {
168
43.0k
                ratio_and_size.emplace_back((double)hollow_size / (double)content_size,
169
43.0k
                                            content_size + hollow_size);
170
43.0k
            }
171
43.0k
        } else {
172
38.3k
            hollow_size += slice.first;
173
38.3k
        }
174
81.4k
    }
175
4.76k
    size_t best_merged_size = 0;
176
47.8k
    for (int i = 0; i < ratio_and_size.size(); ++i) {
177
43.0k
        const std::pair<double, size_t>& rs = ratio_and_size[i];
178
43.0k
        size_t equivalent_size = rs.second / (i + 1);
179
43.0k
        if (rs.second > best_merged_size) {
180
43.0k
            if (rs.first <= _max_amplified_ratio ||
181
43.0k
                (_max_amplified_ratio < 1 && equivalent_size <= _equivalent_io_size)) {
182
43.0k
                best_merged_size = rs.second;
183
43.0k
            }
184
43.0k
        }
185
43.0k
    }
186
187
4.76k
    if (best_merged_size == to_read) {
188
        // read directly to avoid copy operation
189
1.06k
        SCOPED_RAW_TIMER(&_statistics.read_time);
190
1.06k
        size_t read_size = 0;
191
1.06k
        RETURN_IF_ERROR(_reader->read_at(offset + has_read, Slice(result.data + has_read, to_read),
192
1.06k
                                         &read_size, io_ctx));
193
1.06k
        *bytes_read = has_read + read_size;
194
1.06k
        _statistics.merged_io++;
195
1.06k
        _statistics.request_bytes += read_size;
196
1.06k
        _statistics.merged_bytes += read_size;
197
1.06k
        return Status::OK();
198
1.06k
    }
199
200
3.69k
    merge_start = offset + has_read;
201
3.69k
    size_t merge_read_size = 0;
202
3.69k
    RETURN_IF_ERROR(
203
3.69k
            _fill_box(range_index, merge_start, best_merged_size, &merge_read_size, io_ctx));
204
3.69k
    if (cached_data.start_offset != merge_start) {
205
0
        return Status::IOError("Wrong start offset in merged IO");
206
0
    }
207
208
    // read from cached data
209
3.69k
    size_t box_read_size = 0;
210
3.69k
    _read_in_box(cached_data, merge_start, Slice(result.data + has_read, to_read), &box_read_size);
211
3.69k
    *bytes_read = has_read + box_read_size;
212
3.69k
    _statistics.request_bytes += box_read_size;
213
3.69k
    if (*bytes_read < result.size && box_read_size < merge_read_size) {
214
0
        return Status::IOError("Can't read enough bytes in merged IO");
215
0
    }
216
3.69k
    return Status::OK();
217
3.69k
}
218
219
450k
int MergeRangeFileReader::_search_read_range(size_t start_offset, size_t end_offset) {
220
450k
    if (_random_access_ranges.empty()) {
221
0
        return -1;
222
0
    }
223
450k
    int left = 0, right = cast_set<int>(_random_access_ranges.size()) - 1;
224
1.25M
    do {
225
1.25M
        int mid = left + (right - left) / 2;
226
1.25M
        const PrefetchRange& range = _random_access_ranges[mid];
227
1.25M
        if (range.start_offset <= start_offset && start_offset < range.end_offset) {
228
450k
            if (range.start_offset <= end_offset && end_offset <= range.end_offset) {
229
450k
                return mid;
230
450k
            } else {
231
0
                return -1;
232
0
            }
233
801k
        } else if (range.start_offset > start_offset) {
234
281k
            right = mid - 1;
235
519k
        } else {
236
519k
            left = mid + 1;
237
519k
        }
238
1.25M
    } while (left <= right);
239
18.4E
    return -1;
240
450k
}
241
242
37.9k
void MergeRangeFileReader::_clean_cached_data(RangeCachedData& cached_data) {
243
37.9k
    if (!cached_data.empty()) {
244
0
        for (int i = 0; i < cached_data.ref_box.size(); ++i) {
245
0
            DCHECK_GT(cached_data.box_end_offset[i], cached_data.box_start_offset[i]);
246
0
            int16_t box_index = cached_data.ref_box[i];
247
0
            DCHECK_GT(_box_ref[box_index], 0);
248
0
            _box_ref[box_index]--;
249
0
        }
250
0
    }
251
37.9k
    cached_data.reset();
252
37.9k
}
253
254
39.4k
void MergeRangeFileReader::_dec_box_ref(int16_t box_index) {
255
39.4k
    if (--_box_ref[box_index] == 0) {
256
4.29k
        _remaining += BOX_SIZE;
257
4.29k
    }
258
39.4k
    if (box_index == _last_box_ref) {
259
3.16k
        _last_box_ref = -1;
260
3.16k
        _last_box_usage = 0;
261
3.16k
    }
262
39.4k
}
263
264
void MergeRangeFileReader::_read_in_box(RangeCachedData& cached_data, size_t offset, Slice result,
265
449k
                                        size_t* bytes_read) {
266
449k
    SCOPED_RAW_TIMER(&_statistics.copy_time);
267
453k
    auto handle_in_box = [&](size_t remaining, char* copy_out) {
268
453k
        size_t to_handle = remaining;
269
453k
        int cleaned_box = 0;
270
909k
        for (int i = 0; i < cached_data.ref_box.size() && remaining > 0; ++i) {
271
455k
            int16_t box_index = cached_data.ref_box[i];
272
455k
            size_t box_to_handle = std::min(remaining, (size_t)(cached_data.box_end_offset[i] -
273
455k
                                                                cached_data.box_start_offset[i]));
274
455k
            if (copy_out != nullptr) {
275
450k
            }
276
455k
            if (copy_out != nullptr) {
277
450k
                memcpy(copy_out + to_handle - remaining,
278
450k
                       _boxes[box_index].data() + cached_data.box_start_offset[i], box_to_handle);
279
450k
            }
280
455k
            remaining -= box_to_handle;
281
455k
            cached_data.box_start_offset[i] += box_to_handle;
282
455k
            if (cached_data.box_start_offset[i] == cached_data.box_end_offset[i]) {
283
39.4k
                cleaned_box++;
284
39.4k
                _dec_box_ref(box_index);
285
39.4k
            }
286
455k
        }
287
453k
        DCHECK_EQ(remaining, 0);
288
453k
        if (cleaned_box > 0) {
289
39.3k
            cached_data.ref_box.erase(cached_data.ref_box.begin(),
290
39.3k
                                      cached_data.ref_box.begin() + cleaned_box);
291
39.3k
            cached_data.box_start_offset.erase(cached_data.box_start_offset.begin(),
292
39.3k
                                               cached_data.box_start_offset.begin() + cleaned_box);
293
39.3k
            cached_data.box_end_offset.erase(cached_data.box_end_offset.begin(),
294
39.3k
                                             cached_data.box_end_offset.begin() + cleaned_box);
295
39.3k
        }
296
453k
        cached_data.start_offset += to_handle;
297
453k
        if (cached_data.start_offset == cached_data.end_offset) {
298
37.9k
            _clean_cached_data(cached_data);
299
37.9k
        }
300
453k
    };
301
302
449k
    if (offset > cached_data.start_offset) {
303
        // the data in range may be skipped
304
4.44k
        size_t to_skip = offset - cached_data.start_offset;
305
4.44k
        handle_in_box(to_skip, nullptr);
306
4.44k
    }
307
308
449k
    size_t to_read = std::min(cached_data.end_offset - cached_data.start_offset, result.size);
309
449k
    handle_in_box(to_read, result.data);
310
449k
    *bytes_read = to_read;
311
449k
}
312
313
Status MergeRangeFileReader::_fill_box(int range_index, size_t start_offset, size_t to_read,
314
3.70k
                                       size_t* bytes_read, const IOContext* io_ctx) {
315
3.70k
    if (!_read_slice) {
316
3.31k
        _read_slice = std::make_unique<OwnedSlice>(_merged_read_slice_size);
317
3.31k
    }
318
319
3.70k
    *bytes_read = 0;
320
3.70k
    {
321
3.70k
        SCOPED_RAW_TIMER(&_statistics.read_time);
322
3.70k
        RETURN_IF_ERROR(_reader->read_at(start_offset, Slice(_read_slice->data(), to_read),
323
3.70k
                                         bytes_read, io_ctx));
324
3.70k
        _statistics.merged_io++;
325
3.70k
        _statistics.merged_bytes += *bytes_read;
326
3.70k
    }
327
328
3.70k
    SCOPED_RAW_TIMER(&_statistics.copy_time);
329
3.70k
    size_t copy_start = start_offset;
330
3.70k
    const size_t copy_end = start_offset + *bytes_read;
331
    // copy data into small boxes
332
    // tuple(box_index, box_start_offset, file_start_offset, file_end_offset)
333
3.70k
    std::vector<std::tuple<int16_t, uint32_t, size_t, size_t>> filled_boxes;
334
335
43.9k
    auto fill_box = [&](int16_t fill_box_ref, uint32_t box_usage, size_t box_copy_end) {
336
43.9k
        size_t copy_size = std::min(box_copy_end - copy_start, BOX_SIZE - box_usage);
337
43.9k
        memcpy(_boxes[fill_box_ref].data() + box_usage,
338
43.9k
               _read_slice->data() + copy_start - start_offset, copy_size);
339
43.9k
        filled_boxes.emplace_back(fill_box_ref, box_usage, copy_start, copy_start + copy_size);
340
43.9k
        copy_start += copy_size;
341
43.9k
        _last_box_ref = fill_box_ref;
342
43.9k
        _last_box_usage = box_usage + cast_set<int>(copy_size);
343
43.9k
        _box_ref[fill_box_ref]++;
344
43.9k
        if (box_usage == 0) {
345
5.40k
            _remaining -= BOX_SIZE;
346
5.40k
        }
347
43.9k
    };
348
349
3.70k
    for (int fill_range_index = range_index;
350
45.6k
         fill_range_index < _random_access_ranges.size() && copy_start < copy_end;
351
41.9k
         ++fill_range_index) {
352
41.9k
        RangeCachedData& fill_range_cache = _range_cached_data[fill_range_index];
353
41.9k
        DCHECK(fill_range_cache.empty());
354
41.9k
        fill_range_cache.reset();
355
41.9k
        const PrefetchRange& fill_range = _random_access_ranges[fill_range_index];
356
41.9k
        if (fill_range.start_offset > copy_start) {
357
            // don't copy hollow data
358
21.2k
            size_t hollow_size = fill_range.start_offset - copy_start;
359
21.2k
            DCHECK_GT(copy_end - copy_start, hollow_size);
360
21.2k
            copy_start += hollow_size;
361
21.2k
        }
362
363
41.9k
        const size_t range_copy_end = std::min(copy_end, fill_range.end_offset);
364
        // reuse the remaining capacity of last box
365
41.9k
        if (_last_box_ref >= 0 && _last_box_usage < BOX_SIZE) {
366
38.5k
            fill_box(_last_box_ref, _last_box_usage, range_copy_end);
367
38.5k
        }
368
        // reuse the former released box
369
49.4k
        for (int16_t i = 0; i < _boxes.size() && copy_start < range_copy_end; ++i) {
370
7.47k
            if (_box_ref[i] == 0) {
371
152
                fill_box(i, 0, range_copy_end);
372
152
            }
373
7.47k
        }
374
        // apply for new box to copy data
375
47.2k
        while (copy_start < range_copy_end && _boxes.size() < NUM_BOX) {
376
5.25k
            _boxes.emplace_back(BOX_SIZE);
377
5.25k
            _box_ref.emplace_back(0);
378
5.25k
            fill_box(cast_set<int16_t>(_boxes.size()) - 1, 0, range_copy_end);
379
5.25k
        }
380
41.9k
        DCHECK_EQ(copy_start, range_copy_end);
381
382
41.9k
        if (!filled_boxes.empty()) {
383
41.9k
            fill_range_cache.start_offset = std::get<2>(filled_boxes[0]);
384
41.9k
            fill_range_cache.end_offset = std::get<3>(filled_boxes.back());
385
43.9k
            for (auto& tuple : filled_boxes) {
386
43.9k
                fill_range_cache.ref_box.emplace_back(std::get<0>(tuple));
387
43.9k
                fill_range_cache.box_start_offset.emplace_back(std::get<1>(tuple));
388
43.9k
                fill_range_cache.box_end_offset.emplace_back(
389
43.9k
                        std::get<1>(tuple) + std::get<3>(tuple) - std::get<2>(tuple));
390
43.9k
            }
391
41.9k
            filled_boxes.clear();
392
41.9k
        }
393
41.9k
    }
394
3.70k
    return Status::OK();
395
3.70k
}
396
397
// there exists occasions where the buffer is already closed but
398
// some prior tasks are still queued in thread pool, so we have to check whether
399
// the buffer is closed each time the condition variable is notified.
400
375
void PrefetchBuffer::reset_offset(size_t offset) {
401
375
    {
402
375
        std::unique_lock lck {_lock};
403
375
        if (!_prefetched.wait_for(
404
375
                    lck, std::chrono::milliseconds(config::buffered_reader_read_timeout_ms),
405
375
                    [this]() { return _buffer_status != BufferStatus::PENDING; })) {
406
0
            _prefetch_status = Status::TimedOut("time out when reset prefetch buffer");
407
0
            return;
408
0
        }
409
375
        if (UNLIKELY(_buffer_status == BufferStatus::CLOSED)) {
410
0
            _prefetched.notify_all();
411
0
            return;
412
0
        }
413
375
        _buffer_status = BufferStatus::RESET;
414
375
        _offset = offset;
415
375
        _prefetched.notify_all();
416
375
    }
417
375
    if (UNLIKELY(offset >= _file_range.end_offset)) {
418
199
        _len = 0;
419
199
        _exceed = true;
420
199
        return;
421
199
    } else {
422
176
        _exceed = false;
423
176
    }
424
    // Lazy-allocate the backing buffer in the calling (query) thread, which has a
425
    // MemTrackerLimiter attached. The prefetch thread pool threads are "Orphan" threads
426
    // without a tracker, so allocation must not happen there.
427
176
    if (_buf.empty()) {
428
148
        _buf.resize(_size);
429
148
    }
430
176
    _prefetch_status = ExecEnv::GetInstance()->buffered_reader_prefetch_thread_pool()->submit_func(
431
176
            [buffer_ptr = shared_from_this()]() { buffer_ptr->prefetch_buffer(); });
432
176
}
433
434
// only this function would run concurrently in another thread
435
176
void PrefetchBuffer::prefetch_buffer() {
436
176
    {
437
176
        std::unique_lock lck {_lock};
438
176
        if (!_prefetched.wait_for(
439
176
                    lck, std::chrono::milliseconds(config::buffered_reader_read_timeout_ms),
440
176
                    [this]() {
441
176
                        return _buffer_status == BufferStatus::RESET ||
442
176
                               _buffer_status == BufferStatus::CLOSED;
443
176
                    })) {
444
0
            _prefetch_status = Status::TimedOut("time out when invoking prefetch buffer");
445
0
            return;
446
0
        }
447
        // in case buffer is already closed
448
176
        if (UNLIKELY(_buffer_status == BufferStatus::CLOSED)) {
449
0
            _prefetched.notify_all();
450
0
            return;
451
0
        }
452
176
        _buffer_status = BufferStatus::PENDING;
453
176
        _prefetched.notify_all();
454
176
    }
455
456
0
    int read_range_index = search_read_range(_offset);
457
176
    size_t buf_size;
458
176
    if (read_range_index == -1) {
459
176
        buf_size =
460
176
                _file_range.end_offset - _offset > _size ? _size : _file_range.end_offset - _offset;
461
176
    } else {
462
0
        buf_size = merge_small_ranges(_offset, read_range_index);
463
0
    }
464
465
176
    _len = 0;
466
176
    Status s;
467
468
176
    {
469
176
        SCOPED_RAW_TIMER(&_statis.read_time);
470
176
        s = _reader->read_at(_offset, Slice {_buf.data(), buf_size}, &_len, _io_ctx);
471
176
    }
472
176
    if (UNLIKELY(s.ok() && buf_size != _len)) {
473
        // This indicates that the data size returned by S3 object storage is smaller than what we requested,
474
        // which seems to be a violation of the S3 protocol since our request range was valid.
475
        // We currently consider this situation a bug and will treat this task as a failure.
476
0
        s = Status::InternalError("Data size returned by S3 is smaller than requested");
477
0
        LOG(WARNING) << "Data size returned by S3 is smaller than requested" << _reader->path()
478
0
                     << " request bytes " << buf_size << " returned size " << _len;
479
0
    }
480
176
    g_bytes_downloaded << _len;
481
176
    _statis.prefetch_request_io += 1;
482
176
    _statis.prefetch_request_bytes += _len;
483
176
    std::unique_lock lck {_lock};
484
176
    if (!_prefetched.wait_for(lck,
485
176
                              std::chrono::milliseconds(config::buffered_reader_read_timeout_ms),
486
176
                              [this]() { return _buffer_status == BufferStatus::PENDING; })) {
487
0
        _prefetch_status = Status::TimedOut("time out when invoking prefetch buffer");
488
0
        return;
489
0
    }
490
176
    if (!s.ok() && _offset < _reader->size()) {
491
        // We should print the error msg since this buffer might not be accessed by the consumer
492
        // which would result in the status being missed
493
0
        LOG_WARNING("prefetch path {} failed, offset {}, error {}", _reader->path().native(),
494
0
                    _offset, s.to_string());
495
0
        _prefetch_status = std::move(s);
496
0
    }
497
176
    _buffer_status = BufferStatus::PREFETCHED;
498
176
    _prefetched.notify_all();
499
    // eof would come up with len == 0, it would be handled by read_buffer
500
176
}
501
502
176
int PrefetchBuffer::search_read_range(size_t off) const {
503
176
    if (_random_access_ranges == nullptr || _random_access_ranges->empty()) {
504
176
        return -1;
505
176
    }
506
0
    const std::vector<PrefetchRange>& random_access_ranges = *_random_access_ranges;
507
0
    int left = 0, right = cast_set<int>(random_access_ranges.size()) - 1;
508
0
    do {
509
0
        int mid = left + (right - left) / 2;
510
0
        const PrefetchRange& range = random_access_ranges[mid];
511
0
        if (range.start_offset <= off && range.end_offset > off) {
512
0
            return mid;
513
0
        } else if (range.start_offset > off) {
514
0
            right = mid;
515
0
        } else {
516
0
            left = mid + 1;
517
0
        }
518
0
    } while (left < right);
519
0
    if (random_access_ranges[right].start_offset > off) {
520
0
        return right;
521
0
    } else {
522
0
        return -1;
523
0
    }
524
0
}
525
526
0
size_t PrefetchBuffer::merge_small_ranges(size_t off, int range_index) const {
527
0
    if (_random_access_ranges == nullptr || _random_access_ranges->empty()) {
528
0
        return _size;
529
0
    }
530
0
    int64_t remaining = _size;
531
0
    const std::vector<PrefetchRange>& random_access_ranges = *_random_access_ranges;
532
0
    while (remaining > 0 && range_index < random_access_ranges.size()) {
533
0
        const PrefetchRange& range = random_access_ranges[range_index];
534
0
        if (range.start_offset <= off && range.end_offset > off) {
535
0
            remaining -= range.end_offset - off;
536
0
            off = range.end_offset;
537
0
            range_index++;
538
0
        } else if (range.start_offset > off) {
539
            // merge small range
540
0
            size_t hollow = range.start_offset - off;
541
0
            if (hollow < remaining) {
542
0
                remaining -= hollow;
543
0
                off = range.start_offset;
544
0
            } else {
545
0
                break;
546
0
            }
547
0
        } else {
548
0
            DCHECK(false);
549
0
        }
550
0
    }
551
0
    if (remaining < 0 || remaining == _size) {
552
0
        remaining = 0;
553
0
    }
554
0
    return _size - remaining;
555
0
}
556
557
Status PrefetchBuffer::read_buffer(size_t off, const char* out, size_t buf_len,
558
251
                                   size_t* bytes_read) {
559
251
    if (UNLIKELY(off >= _file_range.end_offset)) {
560
        // Reader can read out of [start_offset, end_offset) by synchronous method.
561
0
        return _reader->read_at(off, Slice {out, buf_len}, bytes_read, _io_ctx);
562
0
    }
563
251
    if (_exceed) {
564
0
        reset_offset((off / _size) * _size);
565
0
        return read_buffer(off, out, buf_len, bytes_read);
566
0
    }
567
251
    {
568
251
        std::unique_lock lck {_lock};
569
        // buffer must be prefetched or it's closed
570
251
        if (!_prefetched.wait_for(
571
251
                    lck, std::chrono::milliseconds(config::buffered_reader_read_timeout_ms),
572
331
                    [this]() {
573
331
                        return _buffer_status == BufferStatus::PREFETCHED ||
574
331
                               _buffer_status == BufferStatus::CLOSED;
575
331
                    })) {
576
0
            _prefetch_status = Status::TimedOut("time out when read prefetch buffer");
577
0
            return _prefetch_status;
578
0
        }
579
251
        if (UNLIKELY(BufferStatus::CLOSED == _buffer_status)) {
580
0
            return Status::OK();
581
0
        }
582
251
    }
583
251
    RETURN_IF_ERROR(_prefetch_status);
584
    // there is only parquet would do not sequence read
585
    // it would read the end of the file first
586
251
    if (UNLIKELY(!contains(off))) {
587
0
        reset_offset((off / _size) * _size);
588
0
        return read_buffer(off, out, buf_len, bytes_read);
589
0
    }
590
251
    if (UNLIKELY(0 == _len || _offset + _len < off)) {
591
0
        return Status::OK();
592
0
    }
593
594
251
    {
595
251
        LIMIT_REMOTE_SCAN_IO(bytes_read);
596
        // [0]: maximum len trying to read, [1] maximum length buffer can provide, [2] actual len buffer has
597
251
        size_t read_len = std::min({buf_len, _offset + _size - off, _offset + _len - off});
598
251
        {
599
251
            SCOPED_RAW_TIMER(&_statis.copy_time);
600
251
            memcpy((void*)out, _buf.data() + (off - _offset), read_len);
601
251
        }
602
251
        *bytes_read = read_len;
603
251
        _statis.request_io += 1;
604
251
        _statis.request_bytes += read_len;
605
251
    }
606
251
    if (off + *bytes_read == _offset + _len) {
607
135
        reset_offset(_offset + _whole_buffer_size);
608
135
    }
609
251
    return Status::OK();
610
251
}
611
612
240
void PrefetchBuffer::close() {
613
240
    std::unique_lock lck {_lock};
614
    // in case _reader still tries to write to the buf after we close the buffer
615
240
    if (!_prefetched.wait_for(lck,
616
240
                              std::chrono::milliseconds(config::buffered_reader_read_timeout_ms),
617
242
                              [this]() { return _buffer_status != BufferStatus::PENDING; })) {
618
0
        _prefetch_status = Status::TimedOut("time out when close prefetch buffer");
619
0
        return;
620
0
    }
621
240
    _buffer_status = BufferStatus::CLOSED;
622
240
    _prefetched.notify_all();
623
    // Explicitly release the backing buffer here, in the calling (query) thread which has a
624
    // MemTrackerLimiter. The destructor may run in the thread pool's Orphan thread (when the
625
    // last shared_ptr ref is released after the prefetch lambda completes), so we must not
626
    // rely on ~PODArray() to release memory — that would trigger memory_orphan_check().
627
240
    PODArray<char>().swap(_buf);
628
240
}
629
630
72
void PrefetchBuffer::_collect_profile_before_close() {
631
72
    if (_sync_profile != nullptr) {
632
72
        _sync_profile(*this);
633
72
    }
634
72
}
635
636
// buffered reader
637
PrefetchBufferedReader::PrefetchBufferedReader(RuntimeProfile* profile, io::FileReaderSPtr reader,
638
                                               PrefetchRange file_range,
639
                                               std::shared_ptr<const IOContext> io_ctx,
640
                                               int64_t buffer_size)
641
60
        : _reader(std::move(reader)), _file_range(file_range), _io_ctx_holder(std::move(io_ctx)) {
642
60
    if (_io_ctx_holder == nullptr) {
643
4
        _io_ctx_holder = std::make_shared<IOContext>();
644
4
    }
645
60
    _io_ctx = _io_ctx_holder.get();
646
60
    if (buffer_size == -1L) {
647
60
        buffer_size = config::remote_storage_read_buffer_mb * 1024 * 1024;
648
60
    }
649
60
    _size = _reader->size();
650
60
    _whole_pre_buffer_size = buffer_size;
651
60
    _file_range.end_offset = std::min(_file_range.end_offset, _size);
652
60
    int buffer_num = buffer_size > s_max_pre_buffer_size
653
60
                             ? cast_set<int>(buffer_size) / cast_set<int>(s_max_pre_buffer_size)
654
60
                             : 1;
655
60
    std::function<void(PrefetchBuffer&)> sync_buffer = nullptr;
656
60
    if (profile != nullptr) {
657
56
        const char* prefetch_buffered_reader = "PrefetchBufferedReader";
658
56
        ADD_TIMER(profile, prefetch_buffered_reader);
659
56
        auto copy_time = ADD_CHILD_TIMER(profile, "CopyTime", prefetch_buffered_reader);
660
56
        auto read_time = ADD_CHILD_TIMER(profile, "ReadTime", prefetch_buffered_reader);
661
56
        auto prefetch_request_io =
662
56
                ADD_CHILD_COUNTER(profile, "PreRequestIO", TUnit::UNIT, prefetch_buffered_reader);
663
56
        auto prefetch_request_bytes = ADD_CHILD_COUNTER(profile, "PreRequestBytes", TUnit::BYTES,
664
56
                                                        prefetch_buffered_reader);
665
56
        auto request_io =
666
56
                ADD_CHILD_COUNTER(profile, "RequestIO", TUnit::UNIT, prefetch_buffered_reader);
667
56
        auto request_bytes =
668
56
                ADD_CHILD_COUNTER(profile, "RequestBytes", TUnit::BYTES, prefetch_buffered_reader);
669
72
        sync_buffer = [=](PrefetchBuffer& buf) {
670
72
            COUNTER_UPDATE(copy_time, buf._statis.copy_time);
671
72
            COUNTER_UPDATE(read_time, buf._statis.read_time);
672
72
            COUNTER_UPDATE(prefetch_request_io, buf._statis.prefetch_request_io);
673
72
            COUNTER_UPDATE(prefetch_request_bytes, buf._statis.prefetch_request_bytes);
674
72
            COUNTER_UPDATE(request_io, buf._statis.request_io);
675
72
            COUNTER_UPDATE(request_bytes, buf._statis.request_bytes);
676
72
        };
677
56
    }
678
    // set the _cur_offset of this reader as same as the inner reader's,
679
    // to make sure the buffer reader will start to read at right position.
680
300
    for (int i = 0; i < buffer_num; i++) {
681
240
        _pre_buffers.emplace_back(std::make_shared<PrefetchBuffer>(
682
240
                _file_range, s_max_pre_buffer_size, _whole_pre_buffer_size, _reader.get(),
683
240
                _io_ctx_holder, sync_buffer));
684
240
    }
685
60
}
686
687
60
PrefetchBufferedReader::~PrefetchBufferedReader() {
688
    /// Better not to call virtual functions in a destructor.
689
60
    static_cast<void>(_close_internal());
690
60
}
691
692
Status PrefetchBufferedReader::read_at_impl(size_t offset, Slice result, size_t* bytes_read,
693
208
                                            const IOContext* io_ctx) {
694
208
    if (!_initialized) {
695
60
        reset_all_buffer(offset);
696
60
        _initialized = true;
697
60
    }
698
208
    if (UNLIKELY(result.get_size() == 0 || offset >= size())) {
699
13
        *bytes_read = 0;
700
13
        return Status::OK();
701
13
    }
702
195
    size_t nbytes = result.get_size();
703
195
    int actual_bytes_read = 0;
704
446
    while (actual_bytes_read < nbytes && offset < size()) {
705
251
        size_t read_num = 0;
706
251
        auto buffer_pos = get_buffer_pos(offset);
707
251
        RETURN_IF_ERROR(
708
251
                _pre_buffers[buffer_pos]->read_buffer(offset, result.get_data() + actual_bytes_read,
709
251
                                                      nbytes - actual_bytes_read, &read_num));
710
251
        actual_bytes_read += read_num;
711
251
        offset += read_num;
712
251
    }
713
195
    *bytes_read = actual_bytes_read;
714
195
    return Status::OK();
715
195
}
716
717
14
Status PrefetchBufferedReader::close() {
718
14
    return _close_internal();
719
14
}
720
721
74
Status PrefetchBufferedReader::_close_internal() {
722
74
    if (!_closed) {
723
60
        _closed = true;
724
60
        std::for_each(_pre_buffers.begin(), _pre_buffers.end(),
725
240
                      [](std::shared_ptr<PrefetchBuffer>& buffer) { buffer->close(); });
726
60
        return _reader->close();
727
60
    }
728
729
14
    return Status::OK();
730
74
}
731
732
18
void PrefetchBufferedReader::_collect_profile_before_close() {
733
18
    std::for_each(_pre_buffers.begin(), _pre_buffers.end(),
734
72
                  [](std::shared_ptr<PrefetchBuffer>& buffer) {
735
72
                      buffer->collect_profile_before_close();
736
72
                  });
737
18
    if (_reader != nullptr) {
738
18
        _reader->collect_profile_before_close();
739
18
    }
740
18
}
741
742
// InMemoryFileReader
743
34.5k
InMemoryFileReader::InMemoryFileReader(io::FileReaderSPtr reader) : _reader(std::move(reader)) {
744
34.5k
    _size = _reader->size();
745
34.5k
}
746
747
34.5k
InMemoryFileReader::~InMemoryFileReader() {
748
34.5k
    static_cast<void>(_close_internal());
749
34.5k
}
750
751
460
Status InMemoryFileReader::close() {
752
460
    return _close_internal();
753
460
}
754
755
35.0k
Status InMemoryFileReader::_close_internal() {
756
35.0k
    if (!_closed) {
757
34.5k
        _closed = true;
758
34.5k
        return _reader->close();
759
34.5k
    }
760
456
    return Status::OK();
761
35.0k
}
762
763
Status InMemoryFileReader::read_at_impl(size_t offset, Slice result, size_t* bytes_read,
764
143k
                                        const IOContext* io_ctx) {
765
143k
    if (_data == nullptr) {
766
29.6k
        _data = std::make_unique_for_overwrite<char[]>(_size);
767
768
29.6k
        size_t file_size = 0;
769
29.6k
        RETURN_IF_ERROR(_reader->read_at(0, Slice(_data.get(), _size), &file_size, io_ctx));
770
29.6k
        DCHECK_EQ(file_size, _size);
771
29.6k
    }
772
143k
    if (UNLIKELY(offset > _size)) {
773
0
        return Status::IOError("Out of bounds access");
774
0
    }
775
143k
    *bytes_read = std::min(result.size, _size - offset);
776
143k
    memcpy(result.data, _data.get() + offset, *bytes_read);
777
143k
    return Status::OK();
778
143k
}
779
780
28.6k
void InMemoryFileReader::_collect_profile_before_close() {
781
28.6k
    if (_reader != nullptr) {
782
28.6k
        _reader->collect_profile_before_close();
783
28.6k
    }
784
28.6k
}
785
786
// BufferedFileStreamReader
787
BufferedFileStreamReader::BufferedFileStreamReader(io::FileReaderSPtr file, uint64_t offset,
788
                                                   uint64_t length, size_t max_buf_size)
789
174k
        : _file(file),
790
174k
          _file_start_offset(offset),
791
174k
          _file_end_offset(offset + length),
792
174k
          _max_buf_size(max_buf_size) {}
793
794
Status BufferedFileStreamReader::read_bytes(const uint8_t** buf, uint64_t offset,
795
807k
                                            const size_t bytes_to_read, const IOContext* io_ctx) {
796
807k
    if (offset < _file_start_offset || offset >= _file_end_offset ||
797
807k
        offset + bytes_to_read > _file_end_offset) {
798
2
        return Status::IOError(
799
2
                "Out-of-bounds Access: offset={}, bytes_to_read={}, file_start={}, "
800
2
                "file_end={}",
801
2
                offset, bytes_to_read, _file_start_offset, _file_end_offset);
802
2
    }
803
807k
    int64_t end_offset = offset + bytes_to_read;
804
807k
    if (_buf_start_offset <= offset && _buf_end_offset >= end_offset) {
805
329k
        *buf = _buf.get() + offset - _buf_start_offset;
806
329k
        return Status::OK();
807
329k
    }
808
477k
    size_t buf_size = std::max(_max_buf_size, bytes_to_read);
809
477k
    if (_buf_size < buf_size) {
810
75.6k
        auto new_buf = make_unique_buffer<uint8_t>(buf_size);
811
75.6k
        if (offset >= _buf_start_offset && offset < _buf_end_offset) {
812
8.27k
            memcpy(new_buf.get(), _buf.get() + offset - _buf_start_offset,
813
8.27k
                   _buf_end_offset - offset);
814
8.27k
        }
815
75.6k
        _buf = std::move(new_buf);
816
75.6k
        _buf_size = buf_size;
817
402k
    } else if (offset > _buf_start_offset && offset < _buf_end_offset) {
818
382k
        memmove(_buf.get(), _buf.get() + offset - _buf_start_offset, _buf_end_offset - offset);
819
382k
    }
820
477k
    if (offset < _buf_start_offset || offset >= _buf_end_offset) {
821
86.9k
        _buf_end_offset = offset;
822
86.9k
    }
823
477k
    _buf_start_offset = offset;
824
477k
    int64_t buf_remaining = _buf_end_offset - _buf_start_offset;
825
477k
    int64_t to_read = std::min(_buf_size - buf_remaining, _file_end_offset - _buf_end_offset);
826
477k
    int64_t has_read = 0;
827
955k
    while (has_read < to_read) {
828
477k
        size_t loop_read = 0;
829
477k
        Slice result(_buf.get() + buf_remaining + has_read, to_read - has_read);
830
477k
        RETURN_IF_ERROR(_file->read_at(_buf_end_offset + has_read, result, &loop_read, io_ctx));
831
477k
        if (loop_read == 0) {
832
0
            break;
833
0
        }
834
477k
        has_read += loop_read;
835
477k
    }
836
477k
    if (has_read != to_read) {
837
0
        return Status::Corruption("Try to read {} bytes, but received {} bytes", to_read, has_read);
838
0
    }
839
477k
    _buf_end_offset += to_read;
840
477k
    *buf = _buf.get();
841
477k
    return Status::OK();
842
477k
}
843
844
Status BufferedFileStreamReader::read_bytes(Slice& slice, uint64_t offset,
845
312k
                                            const IOContext* io_ctx) {
846
312k
    return read_bytes((const uint8_t**)&slice.data, offset, slice.size, io_ctx);
847
312k
}
848
849
Result<io::FileReaderSPtr> DelegateReader::create_file_reader(
850
        RuntimeProfile* profile, const FileSystemProperties& system_properties,
851
        const FileDescription& file_description, const io::FileReaderOptions& reader_options,
852
75.1k
        AccessMode access_mode, const IOContext* io_ctx, const PrefetchRange file_range) {
853
75.1k
    std::shared_ptr<const IOContext> io_ctx_holder;
854
75.1k
    if (io_ctx != nullptr) {
855
        // Old API: best-effort safety by copying the IOContext onto the heap.
856
75.0k
        io_ctx_holder = std::make_shared<IOContext>(*io_ctx);
857
75.0k
    }
858
75.1k
    return create_file_reader(profile, system_properties, file_description, reader_options,
859
75.1k
                              access_mode, std::move(io_ctx_holder), file_range);
860
75.1k
}
861
862
Result<io::FileReaderSPtr> DelegateReader::create_file_reader(
863
        RuntimeProfile* profile, const FileSystemProperties& system_properties,
864
        const FileDescription& file_description, const io::FileReaderOptions& reader_options,
865
        AccessMode access_mode, std::shared_ptr<const IOContext> io_ctx,
866
76.4k
        const PrefetchRange file_range) {
867
76.4k
    if (io_ctx == nullptr) {
868
41
        io_ctx = std::make_shared<IOContext>();
869
41
    }
870
76.4k
    return FileFactory::create_file_reader(system_properties, file_description, reader_options,
871
76.4k
                                           profile)
872
76.5k
            .transform([&](auto&& reader) -> io::FileReaderSPtr {
873
76.5k
                if (reader->size() < config::in_memory_file_size &&
874
76.5k
                    typeid_cast<io::S3FileReader*>(reader.get())) {
875
34.5k
                    return std::make_shared<InMemoryFileReader>(std::move(reader));
876
34.5k
                }
877
878
42.0k
                if (access_mode == AccessMode::SEQUENTIAL) {
879
5.86k
                    bool is_thread_safe = false;
880
5.86k
                    if (typeid_cast<io::S3FileReader*>(reader.get())) {
881
56
                        is_thread_safe = true;
882
5.81k
                    } else if (auto* cached_reader =
883
5.81k
                                       typeid_cast<io::CachedRemoteFileReader*>(reader.get());
884
5.81k
                               cached_reader &&
885
5.81k
                               typeid_cast<io::S3FileReader*>(cached_reader->get_remote_reader())) {
886
0
                        is_thread_safe = true;
887
0
                    }
888
5.86k
                    if (is_thread_safe) {
889
                        // PrefetchBufferedReader needs thread-safe reader to prefetch data concurrently.
890
56
                        return std::make_shared<io::PrefetchBufferedReader>(
891
56
                                profile, std::move(reader), file_range, io_ctx);
892
56
                    }
893
5.86k
                }
894
895
41.9k
                return reader;
896
42.0k
            });
897
76.4k
}
898
899
Status LinearProbeRangeFinder::get_range_for(int64_t desired_offset,
900
6
                                             io::PrefetchRange& result_range) {
901
9
    while (index < _ranges.size()) {
902
9
        io::PrefetchRange& range = _ranges[index];
903
9
        if (range.end_offset > desired_offset) {
904
6
            if (range.start_offset > desired_offset) [[unlikely]] {
905
0
                return Status::InvalidArgument("Invalid desiredOffset");
906
0
            }
907
6
            result_range = range;
908
6
            return Status::OK();
909
6
        }
910
3
        ++index;
911
3
    }
912
0
    return Status::InvalidArgument("Invalid desiredOffset");
913
6
}
914
915
RangeCacheFileReader::RangeCacheFileReader(RuntimeProfile* profile, io::FileReaderSPtr inner_reader,
916
                                           std::shared_ptr<RangeFinder> range_finder)
917
23.2k
        : _profile(profile),
918
23.2k
          _inner_reader(std::move(inner_reader)),
919
23.2k
          _range_finder(std::move(range_finder)) {
920
23.2k
    _size = _inner_reader->size();
921
23.2k
    uint64_t max_cache_size =
922
23.2k
            std::max((uint64_t)4096, (uint64_t)_range_finder->get_max_range_size());
923
23.2k
    _cache = OwnedSlice(max_cache_size);
924
925
23.3k
    if (_profile != nullptr) {
926
23.3k
        const char* random_profile = "RangeCacheFileReader";
927
23.3k
        ADD_TIMER_WITH_LEVEL(_profile, random_profile, 1);
928
23.3k
        _request_io =
929
23.3k
                ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "RequestIO", TUnit::UNIT, random_profile, 1);
930
23.3k
        _request_bytes = ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "RequestBytes", TUnit::BYTES,
931
23.3k
                                                      random_profile, 1);
932
23.3k
        _request_time = ADD_CHILD_TIMER_WITH_LEVEL(_profile, "RequestTime", random_profile, 1);
933
23.3k
        _read_to_cache_time =
934
23.3k
                ADD_CHILD_TIMER_WITH_LEVEL(_profile, "ReadToCacheTime", random_profile, 1);
935
23.3k
        _cache_refresh_count = ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "CacheRefreshCount",
936
23.3k
                                                            TUnit::UNIT, random_profile, 1);
937
23.3k
        _read_to_cache_bytes = ADD_CHILD_COUNTER_WITH_LEVEL(_profile, "ReadToCacheBytes",
938
23.3k
                                                            TUnit::BYTES, random_profile, 1);
939
23.3k
    }
940
23.2k
}
941
942
Status RangeCacheFileReader::read_at_impl(size_t offset, Slice result, size_t* bytes_read,
943
6
                                          const IOContext* io_ctx) {
944
6
    auto request_size = result.size;
945
946
6
    _cache_statistics.request_io++;
947
6
    _cache_statistics.request_bytes += request_size;
948
6
    SCOPED_RAW_TIMER(&_cache_statistics.request_time);
949
950
6
    PrefetchRange range;
951
6
    if (_range_finder->get_range_for(offset, range)) [[likely]] {
952
6
        if (_current_start_offset != range.start_offset) { // need read new range to cache.
953
6
            auto range_size = range.end_offset - range.start_offset;
954
955
6
            _cache_statistics.cache_refresh_count++;
956
6
            _cache_statistics.read_to_cache_bytes += range_size;
957
6
            SCOPED_RAW_TIMER(&_cache_statistics.read_to_cache_time);
958
959
6
            Slice cache_slice = {_cache.data(), range_size};
960
6
            RETURN_IF_ERROR(
961
6
                    _inner_reader->read_at(range.start_offset, cache_slice, bytes_read, io_ctx));
962
963
6
            if (*bytes_read != range_size) [[unlikely]] {
964
0
                return Status::InternalError(
965
0
                        "RangeCacheFileReader use inner reader read bytes {} not eq expect size {}",
966
0
                        *bytes_read, range_size);
967
0
            }
968
969
6
            _current_start_offset = range.start_offset;
970
6
        }
971
972
6
        int64_t buffer_offset = offset - _current_start_offset;
973
6
        memcpy(result.data, _cache.data() + buffer_offset, request_size);
974
6
        *bytes_read = request_size;
975
976
6
        return Status::OK();
977
6
    } else {
978
0
        return Status::InternalError("RangeCacheFileReader read  not in Ranges. Offset = {}",
979
0
                                     offset);
980
        //                RETURN_IF_ERROR(_inner_reader->read_at(offset, result , bytes_read, io_ctx));
981
        //                return Status::OK();
982
        // think return error is ok,otherwise it will cover up the error.
983
0
    }
984
6
}
985
986
23.3k
void RangeCacheFileReader::_collect_profile_before_close() {
987
23.3k
    if (_profile != nullptr) {
988
23.3k
        COUNTER_UPDATE(_request_io, _cache_statistics.request_io);
989
23.3k
        COUNTER_UPDATE(_request_bytes, _cache_statistics.request_bytes);
990
23.3k
        COUNTER_UPDATE(_request_time, _cache_statistics.request_time);
991
23.3k
        COUNTER_UPDATE(_read_to_cache_time, _cache_statistics.read_to_cache_time);
992
23.3k
        COUNTER_UPDATE(_cache_refresh_count, _cache_statistics.cache_refresh_count);
993
23.3k
        COUNTER_UPDATE(_read_to_cache_bytes, _cache_statistics.read_to_cache_bytes);
994
23.3k
        if (_inner_reader != nullptr) {
995
23.3k
            _inner_reader->collect_profile_before_close();
996
23.3k
        }
997
23.3k
    }
998
23.3k
}
999
1000
} // namespace io
1001
1002
} // namespace doris