Coverage Report

Created: 2026-03-24 10:39

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/io/cache/block_file_cache.h
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
#pragma once
19
20
#include <bvar/bvar.h>
21
#include <concurrentqueue.h>
22
23
#include <algorithm>
24
#include <array>
25
#include <atomic>
26
#include <boost/lockfree/spsc_queue.hpp>
27
#include <functional>
28
#include <memory>
29
#include <mutex>
30
#include <optional>
31
#include <thread>
32
#include <unordered_map>
33
#include <vector>
34
35
#include "io/cache/block_file_cache_ttl_mgr.h"
36
#include "io/cache/cache_lru_dumper.h"
37
#include "io/cache/file_block.h"
38
#include "io/cache/file_cache_common.h"
39
#include "io/cache/file_cache_storage.h"
40
#include "io/cache/lru_queue_recorder.h"
41
#include "runtime/runtime_profile.h"
42
#include "util/threadpool.h"
43
44
namespace doris::io {
45
using RecycleFileCacheKeys = moodycamel::ConcurrentQueue<FileCacheKey>;
46
47
class LockScopedTimer {
48
public:
49
5.33k
    LockScopedTimer() : start_(std::chrono::steady_clock::now()) {}
50
5.33k
    ~LockScopedTimer() {
51
5.33k
        auto end = std::chrono::steady_clock::now();
52
5.33k
        auto duration_us =
53
5.33k
                std::chrono::duration_cast<std::chrono::microseconds>(end - start_).count();
54
5.33k
        if (duration_us > config::cache_lock_held_long_tail_threshold_us) {
55
0
            LOG(WARNING) << "Lock held time " << std::to_string(duration_us) << "us. "
56
0
                         << get_stack_trace();
57
0
        }
58
5.33k
    }
59
60
private:
61
    std::chrono::time_point<std::chrono::steady_clock> start_;
62
};
63
64
// Note: the cache_lock is scoped, so do not add do...while(0) here.
65
#define SCOPED_CACHE_LOCK(MUTEX, cache)                                                           \
66
5.33k
    std::chrono::time_point<std::chrono::steady_clock> start_time =                               \
67
5.33k
            std::chrono::steady_clock::now();                                                     \
68
5.33k
    std::lock_guard cache_lock(MUTEX);                                                            \
69
5.33k
    std::chrono::time_point<std::chrono::steady_clock> acq_time =                                 \
70
5.33k
            std::chrono::steady_clock::now();                                                     \
71
5.33k
    auto duration_us =                                                                            \
72
5.33k
            std::chrono::duration_cast<std::chrono::microseconds>(acq_time - start_time).count(); \
73
5.33k
    *(cache->_cache_lock_wait_time_us) << duration_us;                                            \
74
5.33k
    if (duration_us > config::cache_lock_wait_long_tail_threshold_us) {                           \
75
0
        LOG(WARNING) << "Lock wait time " << std::to_string(duration_us) << "us. "                \
76
0
                     << get_stack_trace() << std::endl;                                           \
77
0
    }                                                                                             \
78
5.33k
    LockScopedTimer cache_lock_timer;
79
80
class FSFileCacheStorage;
81
82
// NeedUpdateLRUBlocks keeps FileBlockSPtr entries that require LRU updates in a
83
// deduplicated, sharded container. Entries are keyed by the raw FileBlock
84
// pointer so that multiple shared_ptr copies of the same block are treated as a
85
// single pending update. The structure is thread-safe and optimized for high
86
// contention insert/drain workloads in the background update thread.
87
// Note that Blocks are updated in batch, internal order is not important.
88
class NeedUpdateLRUBlocks {
89
public:
90
174
    NeedUpdateLRUBlocks() = default;
91
92
    // Insert a block into the pending set. Returns true only when the block
93
    // was not already queued. Null inputs are ignored.
94
    bool insert(FileBlockSPtr block);
95
96
    // Drain up to `limit` unique blocks into `output`. The method returns how
97
    // many blocks were actually drained and shrinks the internal size
98
    // accordingly.
99
    size_t drain(size_t limit, std::vector<FileBlockSPtr>* output);
100
101
    // Remove every pending block from the structure and reset the size.
102
    void clear();
103
104
    // Thread-safe approximate size of queued unique blocks.
105
10.3k
    size_t size() const { return _size.load(std::memory_order_relaxed); }
106
107
private:
108
    static constexpr size_t kShardCount = 64;
109
    static constexpr size_t kShardMask = kShardCount - 1;
110
111
    struct Shard {
112
        std::mutex mutex;
113
        std::unordered_map<FileBlock*, FileBlockSPtr> entries;
114
    };
115
116
    size_t shard_index(FileBlock* ptr) const;
117
118
    std::array<Shard, kShardCount> _shards;
119
    std::atomic<size_t> _size {0};
120
};
121
122
// The BlockFileCache is responsible for the management of the blocks
123
// The current strategies are lru and ttl.
124
125
struct FileBlockCell {
126
    friend class FileBlock;
127
128
    FileBlockSPtr file_block;
129
    /// Iterator is put here on first reservation attempt, if successful.
130
    std::optional<LRUQueue::Iterator> queue_iterator;
131
132
    mutable int64_t atime {0};
133
134
13.5k
    void update_atime() const {
135
13.5k
        atime = std::chrono::duration_cast<std::chrono::milliseconds>(
136
13.5k
                        std::chrono::steady_clock::now().time_since_epoch())
137
13.5k
                        .count();
138
13.5k
    }
139
140
    /// Pointer to file block is always hold by the cache itself.
141
    /// Apart from pointer in cache, it can be hold by cache users, when they call
142
    /// getorSet(), but cache users always hold it via FileBlocksHolder.
143
604k
    bool releasable() const {
144
604k
        return (file_block.use_count() == 1 ||
145
604k
                (file_block.use_count() == 2 && file_block->_owned_by_cached_reader));
146
604k
    }
147
148
622k
    size_t size() const { return file_block->_block_range.size(); }
149
150
70
    FileBlockCell() = default;
151
    FileBlockCell(FileBlockSPtr file_block, std::lock_guard<std::mutex>& cache_lock);
152
    FileBlockCell(FileBlockCell&& other) noexcept
153
17.0k
            : file_block(std::move(other.file_block)),
154
17.0k
              queue_iterator(other.queue_iterator),
155
17.0k
              atime(other.atime) {
156
17.0k
        file_block->cell = this;
157
17.0k
    }
158
159
    FileBlockCell& operator=(const FileBlockCell&) = delete;
160
    FileBlockCell(const FileBlockCell&) = delete;
161
162
0
    size_t dowloading_size() const { return file_block->_downloaded_size; }
163
};
164
165
class BlockFileCache {
166
    friend class FSFileCacheStorage;
167
    friend class MemFileCacheStorage;
168
    friend class FileBlock;
169
    friend struct FileBlocksHolder;
170
    friend class CacheLRUDumper;
171
    friend class LRUQueueRecorder;
172
    friend struct FileBlockCell;
173
174
public:
175
    // hash the file_name to uint128
176
    static UInt128Wrapper hash(const std::string& path);
177
178
    BlockFileCache(const std::string& cache_base_path, const FileCacheSettings& cache_settings);
179
180
169
    virtual ~BlockFileCache() {
181
169
        {
182
169
            std::lock_guard lock(_close_mtx);
183
169
            _close = true;
184
169
        }
185
169
        _close_cv.notify_all();
186
169
        if (_cache_background_monitor_thread.joinable()) {
187
148
            _cache_background_monitor_thread.join();
188
148
        }
189
169
        if (_cache_background_gc_thread.joinable()) {
190
148
            _cache_background_gc_thread.join();
191
148
        }
192
169
        if (_cache_background_evict_in_advance_thread.joinable()) {
193
148
            _cache_background_evict_in_advance_thread.join();
194
148
        }
195
169
        if (_cache_background_lru_dump_thread.joinable()) {
196
148
            _cache_background_lru_dump_thread.join();
197
148
        }
198
169
        if (_cache_background_lru_log_replay_thread.joinable()) {
199
148
            _cache_background_lru_log_replay_thread.join();
200
148
        }
201
169
        if (_cache_background_block_lru_update_thread.joinable()) {
202
148
            _cache_background_block_lru_update_thread.join();
203
148
        }
204
169
        if (_ttl_mgr) {
205
132
            _ttl_mgr.reset();
206
132
        }
207
169
    }
208
209
    /// Restore cache from local filesystem.
210
    Status initialize();
211
212
    /// Cache capacity in bytes.
213
14
    [[nodiscard]] size_t capacity() const { return _capacity; }
214
215
    // try to release all releasable block
216
    // it maybe hang the io/system
217
    size_t try_release();
218
219
148
    [[nodiscard]] const std::string& get_base_path() const { return _cache_base_path; }
220
221
    // Get storage for inspection
222
0
    FileCacheStorage* get_storage() const { return _storage.get(); }
223
224
    /**
225
         * Given an `offset` and `size` representing [offset, offset + size) bytes interval,
226
         * return list of cached non-overlapping non-empty
227
         * file blocks `[block1, ..., blockN]` which intersect with given interval.
228
         *
229
         * blocks in returned list are ordered in ascending order and represent a full contiguous
230
         * interval (no holes). Each block in returned list has state: DOWNLOADED, DOWNLOADING or EMPTY.
231
         *
232
         * As long as pointers to returned file blocks are hold
233
         * it is guaranteed that these file blocks are not removed from cache.
234
         */
235
    FileBlocksHolder get_or_set(const UInt128Wrapper& hash, size_t offset, size_t size,
236
                                CacheContext& context);
237
238
    /**
239
     * record blocks read directly by CachedRemoteFileReader
240
     */
241
    void add_need_update_lru_block(FileBlockSPtr block);
242
243
    /**
244
     * Clear all cached data for this cache instance async
245
     *
246
     * @returns summary message
247
     */
248
    std::string clear_file_cache_async();
249
    std::string clear_file_cache_directly();
250
251
    /**
252
     * Reset the cache capacity. If the new_capacity is smaller than _capacity, the redundant data will be remove async.
253
     *
254
     * @returns summary message
255
     */
256
    std::string reset_capacity(size_t new_capacity);
257
258
    std::map<size_t, FileBlockSPtr> get_blocks_by_key(const UInt128Wrapper& hash);
259
260
    /// For debug and UT
261
    std::string dump_structure(const UInt128Wrapper& hash);
262
    std::string dump_single_cache_type(const UInt128Wrapper& hash, size_t offset);
263
264
    void dump_lru_queues(bool force);
265
266
    [[nodiscard]] size_t get_used_cache_size(FileCacheType type) const;
267
268
    [[nodiscard]] size_t get_file_blocks_num(FileCacheType type) const;
269
270
    // change the block cache type
271
    void change_cache_type(const UInt128Wrapper& hash, size_t offset, FileCacheType new_type,
272
                           std::lock_guard<std::mutex>& cache_lock);
273
274
    // remove all blocks that belong to the key
275
    void remove_if_cached(const UInt128Wrapper& key);
276
    void remove_if_cached_async(const UInt128Wrapper& key);
277
278
    // Shrink the block size. old_size is always larged than new_size.
279
    void reset_range(const UInt128Wrapper&, size_t offset, size_t old_size, size_t new_size,
280
                     std::lock_guard<std::mutex>& cache_lock);
281
282
    // get the hotest blocks message by key
283
    // The tuple is composed of <offset, size, cache_type, expiration_time>
284
    [[nodiscard]] std::vector<std::tuple<size_t, size_t, FileCacheType, uint64_t>>
285
    get_hot_blocks_meta(const UInt128Wrapper& hash) const;
286
287
506
    [[nodiscard]] bool get_async_open_success() const { return _async_open_done; }
288
289
    BlockFileCache& operator=(const BlockFileCache&) = delete;
290
    BlockFileCache(const BlockFileCache&) = delete;
291
292
    // try to reserve the new space for the new block if the cache is full
293
    bool try_reserve(const UInt128Wrapper& hash, const CacheContext& context, size_t offset,
294
                     size_t size, std::lock_guard<std::mutex>& cache_lock);
295
296
    /**
297
     * Proactively evict cache blocks to free up space before cache is full.
298
     * 
299
     * This function attempts to evict blocks from both NORMAL and TTL queues to maintain 
300
     * cache size below high watermark. Unlike try_reserve() which blocks until space is freed,
301
     * this function initiates asynchronous eviction in background.
302
     * 
303
     * @param size Number of bytes to try to evict
304
     * @param cache_lock Lock that must be held while accessing cache data structures
305
     * 
306
     * @pre Caller must hold cache_lock
307
     * @pre _need_evict_cache_in_advance must be true
308
     * @pre _recycle_keys queue must have capacity for evicted blocks
309
     */
310
    void try_evict_in_advance(size_t size, std::lock_guard<std::mutex>& cache_lock);
311
312
    void update_ttl_atime(const UInt128Wrapper& hash);
313
314
    void pause_ttl_manager();
315
    void resume_ttl_manager();
316
317
    std::map<std::string, double> get_stats();
318
319
    // for be UTs
320
    std::map<std::string, double> get_stats_unsafe();
321
322
    using AccessRecord =
323
            std::unordered_map<AccessKeyAndOffset, LRUQueue::Iterator, KeyAndOffsetHash>;
324
325
    /// Used to track and control the cache access of each query.
326
    /// Through it, we can realize the processing of different queries by the cache layer.
327
    struct QueryFileCacheContext {
328
        LRUQueue lru_queue;
329
        AccessRecord records;
330
331
5
        QueryFileCacheContext(size_t max_cache_size) : lru_queue(max_cache_size, 0, 0) {}
332
333
        void remove(const UInt128Wrapper& hash, size_t offset,
334
                    std::lock_guard<std::mutex>& cache_lock);
335
336
        void reserve(const UInt128Wrapper& hash, size_t offset, size_t size,
337
                     std::lock_guard<std::mutex>& cache_lock);
338
339
69
        size_t get_max_cache_size() const { return lru_queue.get_max_size(); }
340
341
48
        size_t get_cache_size(std::lock_guard<std::mutex>& cache_lock) const {
342
48
            return lru_queue.get_capacity(cache_lock);
343
48
        }
344
345
50
        LRUQueue& queue() { return lru_queue; }
346
    };
347
348
    using QueryFileCacheContextPtr = std::shared_ptr<QueryFileCacheContext>;
349
    using QueryFileCacheContextMap = std::unordered_map<TUniqueId, QueryFileCacheContextPtr>;
350
351
    QueryFileCacheContextPtr get_query_context(const TUniqueId& query_id,
352
                                               std::lock_guard<std::mutex>&);
353
354
    void remove_query_context(const TUniqueId& query_id);
355
356
    QueryFileCacheContextPtr get_or_set_query_context(const TUniqueId& query_id,
357
                                                      std::lock_guard<std::mutex>& cache_lock,
358
                                                      int file_cache_query_limit_percent);
359
360
    /// Save a query context information, and adopt different cache policies
361
    /// for different queries through the context cache layer.
362
    struct QueryFileCacheContextHolder {
363
        QueryFileCacheContextHolder(const TUniqueId& query_id, BlockFileCache* mgr,
364
                                    QueryFileCacheContextPtr context)
365
7
                : query_id(query_id), mgr(mgr), context(context) {}
366
367
        QueryFileCacheContextHolder& operator=(const QueryFileCacheContextHolder&) = delete;
368
        QueryFileCacheContextHolder(const QueryFileCacheContextHolder&) = delete;
369
370
7
        ~QueryFileCacheContextHolder() {
371
            /// If only the query_map and the current holder hold the context_query,
372
            /// the query has been completed and the query_context is released.
373
7
            if (context) {
374
6
                context.reset();
375
6
                mgr->remove_query_context(query_id);
376
6
            }
377
7
        }
378
379
        const TUniqueId& query_id;
380
        BlockFileCache* mgr = nullptr;
381
        QueryFileCacheContextPtr context;
382
    };
383
    using QueryFileCacheContextHolderPtr = std::unique_ptr<QueryFileCacheContextHolder>;
384
    QueryFileCacheContextHolderPtr get_query_context_holder(const TUniqueId& query_id,
385
                                                            int file_cache_query_limit_percent);
386
387
0
    int64_t approximate_available_cache_size() const {
388
0
        return std::max<int64_t>(
389
0
                _cache_capacity_metrics->get_value() - _cur_cache_size_metrics->get_value(), 0);
390
0
    }
391
392
    Status report_file_cache_inconsistency(std::vector<std::string>& results);
393
    Status check_file_cache_consistency(InconsistencyContext& inconsistency_context);
394
395
private:
396
    LRUQueue& get_queue(FileCacheType type);
397
    const LRUQueue& get_queue(FileCacheType type) const;
398
399
    template <class T, class U>
400
        requires IsXLock<T> && IsXLock<U>
401
    void remove(FileBlockSPtr file_block, T& cache_lock, U& segment_lock, bool sync = true);
402
403
    FileBlocks get_impl(const UInt128Wrapper& hash, const CacheContext& context,
404
                        const FileBlock::Range& range, std::lock_guard<std::mutex>& cache_lock);
405
406
    template <class T>
407
        requires IsXLock<T>
408
    FileBlockCell* get_cell(const UInt128Wrapper& hash, size_t offset, T& cache_lock);
409
410
    virtual FileBlockCell* add_cell(const UInt128Wrapper& hash, const CacheContext& context,
411
                                    size_t offset, size_t size, FileBlock::State state,
412
                                    std::lock_guard<std::mutex>& cache_lock);
413
414
    Status initialize_unlocked(std::lock_guard<std::mutex>& cache_lock);
415
416
    void update_block_lru(FileBlockSPtr block, std::lock_guard<std::mutex>& cache_lock);
417
418
    void use_cell(FileBlockCell& cell, FileBlocks* result, bool not_need_move,
419
                  std::lock_guard<std::mutex>& cache_lock);
420
421
    bool try_reserve_for_lru(const UInt128Wrapper& hash, QueryFileCacheContextPtr query_context,
422
                             const CacheContext& context, size_t offset, size_t size,
423
                             std::lock_guard<std::mutex>& cache_lock,
424
                             bool evict_in_advance = false);
425
426
    bool try_reserve_during_async_load(size_t size, std::lock_guard<std::mutex>& cache_lock);
427
428
    std::vector<FileCacheType> get_other_cache_type(FileCacheType cur_cache_type);
429
    std::vector<FileCacheType> get_other_cache_type_without_ttl(FileCacheType cur_cache_type);
430
431
    bool try_reserve_from_other_queue(FileCacheType cur_cache_type, size_t offset, int64_t cur_time,
432
                                      std::lock_guard<std::mutex>& cache_lock,
433
                                      bool evict_in_advance = false);
434
435
    size_t get_available_cache_size(FileCacheType cache_type) const;
436
437
    FileBlocks split_range_into_cells(const UInt128Wrapper& hash, const CacheContext& context,
438
                                      size_t offset, size_t size, FileBlock::State state,
439
                                      std::lock_guard<std::mutex>& cache_lock);
440
441
    std::string dump_structure_unlocked(const UInt128Wrapper& hash,
442
                                        std::lock_guard<std::mutex>& cache_lock);
443
444
    std::string dump_single_cache_type_unlocked(const UInt128Wrapper& hash, size_t offset,
445
                                                std::lock_guard<std::mutex>& cache_lock);
446
447
    void fill_holes_with_empty_file_blocks(FileBlocks& file_blocks, const UInt128Wrapper& hash,
448
                                           const CacheContext& context,
449
                                           const FileBlock::Range& range,
450
                                           std::lock_guard<std::mutex>& cache_lock);
451
452
    size_t get_used_cache_size_unlocked(FileCacheType type,
453
                                        std::lock_guard<std::mutex>& cache_lock) const;
454
455
    void check_disk_resource_limit();
456
    void check_need_evict_cache_in_advance();
457
458
    size_t get_available_cache_size_unlocked(FileCacheType type,
459
                                             std::lock_guard<std::mutex>& cache_lock) const;
460
461
    size_t get_file_blocks_num_unlocked(FileCacheType type,
462
                                        std::lock_guard<std::mutex>& cache_lock) const;
463
464
    bool need_to_move(FileCacheType cell_type, FileCacheType query_type) const;
465
466
    void run_background_monitor();
467
    void run_background_gc();
468
    void run_background_lru_log_replay();
469
    void run_background_lru_dump();
470
    void restore_lru_queues_from_disk(std::lock_guard<std::mutex>& cache_lock);
471
    void run_background_evict_in_advance();
472
    void run_background_block_lru_update();
473
474
    bool try_reserve_from_other_queue_by_time_interval(FileCacheType cur_type,
475
                                                       std::vector<FileCacheType> other_cache_types,
476
                                                       size_t size, int64_t cur_time,
477
                                                       std::lock_guard<std::mutex>& cache_lock,
478
                                                       bool evict_in_advance);
479
480
    bool try_reserve_from_other_queue_by_size(FileCacheType cur_type,
481
                                              std::vector<FileCacheType> other_cache_types,
482
                                              size_t size, std::lock_guard<std::mutex>& cache_lock,
483
                                              bool evict_in_advance);
484
485
    bool is_overflow(size_t removed_size, size_t need_size, size_t cur_cache_size,
486
                     bool evict_in_advance) const;
487
488
    void remove_file_blocks(std::vector<FileBlockCell*>&, std::lock_guard<std::mutex>&, bool sync,
489
                            std::string& reason);
490
491
    void find_evict_candidates(LRUQueue& queue, size_t size, size_t cur_cache_size,
492
                               size_t& removed_size, std::vector<FileBlockCell*>& to_evict,
493
                               std::lock_guard<std::mutex>& cache_lock, size_t& cur_removed_size,
494
                               bool evict_in_advance);
495
496
    Status check_ofstream_status(std::ofstream& out, std::string& filename);
497
    Status dump_one_lru_entry(std::ofstream& out, std::string& filename, const UInt128Wrapper& hash,
498
                              size_t offset, size_t size);
499
    Status finalize_dump(std::ofstream& out, size_t entry_num, std::string& tmp_filename,
500
                         std::string& final_filename, size_t& file_size);
501
    Status check_ifstream_status(std::ifstream& in, std::string& filename);
502
    Status parse_dump_footer(std::ifstream& in, std::string& filename, size_t& entry_num);
503
    Status parse_one_lru_entry(std::ifstream& in, std::string& filename, UInt128Wrapper& hash,
504
                               size_t& offset, size_t& size);
505
    void remove_lru_dump_files();
506
507
    void clear_need_update_lru_blocks();
508
509
    // info
510
    std::string _cache_base_path;
511
    size_t _capacity = 0;
512
    size_t _max_file_block_size = 0;
513
514
    mutable std::mutex _mutex;
515
    bool _close {false};
516
    std::mutex _close_mtx;
517
    std::condition_variable _close_cv;
518
    std::thread _cache_background_monitor_thread;
519
    std::thread _cache_background_gc_thread;
520
    std::thread _cache_background_evict_in_advance_thread;
521
    std::thread _cache_background_lru_dump_thread;
522
    std::thread _cache_background_lru_log_replay_thread;
523
    std::thread _cache_background_block_lru_update_thread;
524
    std::atomic_bool _async_open_done {false};
525
    // disk space or inode is less than the specified value
526
    bool _disk_resource_limit_mode {false};
527
    bool _need_evict_cache_in_advance {false};
528
    bool _is_initialized {false};
529
530
    // strategy
531
    using FileBlocksByOffset = std::map<size_t, FileBlockCell>;
532
    using CachedFiles = std::unordered_map<UInt128Wrapper, FileBlocksByOffset, KeyHash>;
533
    CachedFiles _files;
534
    QueryFileCacheContextMap _query_map;
535
    size_t _cur_cache_size = 0;
536
    size_t _cur_ttl_size = 0;
537
    std::multimap<uint64_t, UInt128Wrapper> _time_to_key;
538
    std::unordered_map<UInt128Wrapper, uint64_t, KeyHash> _key_to_time;
539
    // The three queues are level queue.
540
    // It means as level1/level2/level3 queue.
541
    // but the level2 is maximum.
542
    // If some datas are importance, we can cache it into index queue
543
    // If some datas are just use once, we can cache it into disposable queue
544
    // The size proportion is [1:17:2].
545
    LRUQueue _index_queue;
546
    LRUQueue _normal_queue;
547
    LRUQueue _disposable_queue;
548
    LRUQueue _ttl_queue;
549
    LRUQueue _cold_normal_queue;
550
551
    // keys for async remove
552
    RecycleFileCacheKeys _recycle_keys;
553
554
    std::unique_ptr<LRUQueueRecorder> _lru_recorder;
555
    std::unique_ptr<CacheLRUDumper> _lru_dumper;
556
    std::unique_ptr<BlockFileCacheTtlMgr> _ttl_mgr;
557
558
    // metrics
559
    std::shared_ptr<bvar::Status<size_t>> _cache_capacity_metrics;
560
    std::shared_ptr<bvar::Status<size_t>> _cur_cache_size_metrics;
561
    std::shared_ptr<bvar::Status<size_t>> _cur_ttl_cache_size_metrics;
562
    std::shared_ptr<bvar::Status<size_t>> _cur_ttl_cache_lru_queue_cache_size_metrics;
563
    std::shared_ptr<bvar::Status<size_t>> _cur_ttl_cache_lru_queue_element_count_metrics;
564
    std::shared_ptr<bvar::Status<size_t>> _cur_normal_queue_element_count_metrics;
565
    std::shared_ptr<bvar::Status<size_t>> _cur_normal_queue_cache_size_metrics;
566
    std::shared_ptr<bvar::Status<size_t>> _cur_cold_normal_queue_element_count_metrics;
567
    std::shared_ptr<bvar::Status<size_t>> _cur_cold_normal_queue_cache_size_metrics;
568
    std::shared_ptr<bvar::Status<size_t>> _cur_index_queue_element_count_metrics;
569
    std::shared_ptr<bvar::Status<size_t>> _cur_index_queue_cache_size_metrics;
570
    std::shared_ptr<bvar::Status<size_t>> _cur_disposable_queue_element_count_metrics;
571
    std::shared_ptr<bvar::Status<size_t>> _cur_disposable_queue_cache_size_metrics;
572
    std::array<std::shared_ptr<bvar::Adder<size_t>>, 5> _queue_evict_size_metrics;
573
    std::shared_ptr<bvar::Adder<size_t>> _total_read_size_metrics;
574
    std::shared_ptr<bvar::Adder<size_t>> _total_hit_size_metrics;
575
    std::shared_ptr<bvar::Adder<size_t>> _total_evict_size_metrics;
576
    std::shared_ptr<bvar::Adder<size_t>> _gc_evict_bytes_metrics;
577
    std::shared_ptr<bvar::Adder<size_t>> _gc_evict_count_metrics;
578
    std::shared_ptr<bvar::Adder<size_t>> _evict_by_time_metrics_matrix[5][5];
579
    std::shared_ptr<bvar::Adder<size_t>> _evict_by_size_metrics_matrix[5][5];
580
    std::shared_ptr<bvar::Adder<size_t>> _evict_by_self_lru_metrics_matrix[5];
581
    std::shared_ptr<bvar::Adder<size_t>> _evict_by_try_release;
582
583
    std::shared_ptr<bvar::Window<bvar::Adder<size_t>>> _num_hit_blocks_5m;
584
    std::shared_ptr<bvar::Window<bvar::Adder<size_t>>> _num_read_blocks_5m;
585
    std::shared_ptr<bvar::Window<bvar::Adder<size_t>>> _num_hit_blocks_1h;
586
    std::shared_ptr<bvar::Window<bvar::Adder<size_t>>> _num_read_blocks_1h;
587
588
    std::shared_ptr<bvar::Adder<size_t>> _num_read_blocks;
589
    std::shared_ptr<bvar::Adder<size_t>> _num_hit_blocks;
590
    std::shared_ptr<bvar::Adder<size_t>> _num_removed_blocks;
591
592
    std::shared_ptr<bvar::Adder<size_t>> _no_warmup_num_read_blocks;
593
    std::shared_ptr<bvar::Adder<size_t>> _no_warmup_num_hit_blocks;
594
595
    std::shared_ptr<bvar::Window<bvar::Adder<size_t>>> _no_warmup_num_hit_blocks_5m;
596
    std::shared_ptr<bvar::Window<bvar::Adder<size_t>>> _no_warmup_num_read_blocks_5m;
597
    std::shared_ptr<bvar::Window<bvar::Adder<size_t>>> _no_warmup_num_hit_blocks_1h;
598
    std::shared_ptr<bvar::Window<bvar::Adder<size_t>>> _no_warmup_num_read_blocks_1h;
599
600
    std::shared_ptr<bvar::Status<double>> _hit_ratio;
601
    std::shared_ptr<bvar::Status<double>> _hit_ratio_5m;
602
    std::shared_ptr<bvar::Status<double>> _hit_ratio_1h;
603
    std::shared_ptr<bvar::Status<double>> _no_warmup_hit_ratio;
604
    std::shared_ptr<bvar::Status<double>> _no_warmup_hit_ratio_5m;
605
    std::shared_ptr<bvar::Status<double>> _no_warmup_hit_ratio_1h;
606
    std::shared_ptr<bvar::Status<size_t>> _disk_limit_mode_metrics;
607
    std::shared_ptr<bvar::Status<size_t>> _need_evict_cache_in_advance_metrics;
608
    std::shared_ptr<bvar::Status<size_t>> _meta_store_write_queue_size_metrics;
609
610
    std::shared_ptr<bvar::LatencyRecorder> _cache_lock_wait_time_us;
611
    std::shared_ptr<bvar::LatencyRecorder> _get_or_set_latency_us;
612
    std::shared_ptr<bvar::LatencyRecorder> _storage_sync_remove_latency_us;
613
    std::shared_ptr<bvar::LatencyRecorder> _storage_retry_sync_remove_latency_us;
614
    std::shared_ptr<bvar::LatencyRecorder> _storage_async_remove_latency_us;
615
    std::shared_ptr<bvar::LatencyRecorder> _evict_in_advance_latency_us;
616
    std::shared_ptr<bvar::LatencyRecorder> _recycle_keys_length_recorder;
617
    std::shared_ptr<bvar::LatencyRecorder> _update_lru_blocks_latency_us;
618
    std::shared_ptr<bvar::LatencyRecorder> _need_update_lru_blocks_length_recorder;
619
    std::shared_ptr<bvar::LatencyRecorder> _ttl_gc_latency_us;
620
621
    std::shared_ptr<bvar::LatencyRecorder> _shadow_queue_levenshtein_distance;
622
    // keep _storage last so it will deconstruct first
623
    // otherwise, load_cache_info_into_memory might crash
624
    // coz it will use other members of BlockFileCache
625
    // so join this async load thread first
626
    std::unique_ptr<FileCacheStorage> _storage;
627
    std::shared_ptr<bvar::LatencyRecorder> _lru_dump_latency_us;
628
    std::mutex _dump_lru_queues_mtx;
629
    NeedUpdateLRUBlocks _need_update_lru_blocks;
630
};
631
632
} // namespace doris::io