/root/doris/be/src/olap/lru_cache.h
| Line | Count | Source | 
| 1 |  | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. | 
| 2 |  | // Use of this source code is governed by a BSD-style license that can be | 
| 3 |  | // found in the LICENSE file. See the AUTHORS file for names of contributors. | 
| 4 |  |  | 
| 5 |  | #pragma once | 
| 6 |  |  | 
| 7 |  | #include <butil/macros.h> | 
| 8 |  | #include <bvar/bvar.h> | 
| 9 |  | #include <glog/logging.h> | 
| 10 |  | #include <gtest/gtest_prod.h> | 
| 11 |  |  | 
| 12 |  | #include <atomic> | 
| 13 |  | #include <cassert> | 
| 14 |  | #include <cstdint> | 
| 15 |  | #include <cstdlib> | 
| 16 |  | #include <cstring> | 
| 17 |  | #include <functional> | 
| 18 |  | #include <memory> | 
| 19 |  | #include <set> | 
| 20 |  | #include <string> | 
| 21 |  | #include <utility> | 
| 22 |  |  | 
| 23 |  | #include "runtime/memory/lru_cache_value_base.h" | 
| 24 |  | #include "util/doris_metrics.h" | 
| 25 |  | #include "util/metrics.h" | 
| 26 |  |  | 
| 27 |  | namespace doris { | 
| 28 |  | #include "common/compile_check_begin.h" | 
| 29 |  |  | 
| 30 |  | class Cache; | 
| 31 |  | class LRUCachePolicy; | 
| 32 |  | struct LRUHandle; | 
| 33 |  |  | 
| 34 |  | enum LRUCacheType { | 
| 35 |  |     SIZE, // The capacity of cache is based on the memory size of cache entry, memory size = handle size + charge. | 
| 36 |  |     NUMBER // The capacity of cache is based on the number of cache entry, number = charge, the weight of an entry. | 
| 37 |  | }; | 
| 38 |  |  | 
| 39 |  | static constexpr LRUCacheType DEFAULT_LRU_CACHE_TYPE = LRUCacheType::SIZE; | 
| 40 |  | static constexpr uint32_t DEFAULT_LRU_CACHE_NUM_SHARDS = 32; | 
| 41 |  | static constexpr size_t DEFAULT_LRU_CACHE_ELEMENT_COUNT_CAPACITY = 0; | 
| 42 |  | static constexpr bool DEFAULT_LRU_CACHE_IS_LRU_K = false; | 
| 43 |  |  | 
| 44 |  | class CacheKey { | 
| 45 |  | public: | 
| 46 | 0 |     CacheKey() : _size(0) {} | 
| 47 |  |     // Create a slice that refers to d[0,n-1]. | 
| 48 | 1.28M |     CacheKey(const char* d, size_t n) : _data(d), _size(n) {} | 
| 49 |  |  | 
| 50 |  |     // Create a slice that refers to the contents of "s" | 
| 51 | 44.6k |     CacheKey(const std::string& s) : _data(s.data()), _size(s.size()) {} | 
| 52 |  |  | 
| 53 |  |     // Create a slice that refers to s[0,strlen(s)-1] | 
| 54 | 50 |     CacheKey(const char* s) : _data(s), _size(strlen(s)) {} | 
| 55 |  |  | 
| 56 |  |     ~CacheKey() = default; | 
| 57 |  |  | 
| 58 |  |     // Return a pointer to the beginning of the referenced data | 
| 59 | 1.95M |     const char* data() const { return _data; } | 
| 60 |  |  | 
| 61 |  |     // Return the length (in bytes) of the referenced data | 
| 62 | 3.26M |     size_t size() const { return _size; } | 
| 63 |  |  | 
| 64 |  |     // Return true if the length of the referenced data is zero | 
| 65 | 0 |     bool empty() const { return _size == 0; } | 
| 66 |  |  | 
| 67 |  |     // Return the ith byte in the referenced data. | 
| 68 |  |     // REQUIRES: n < size() | 
| 69 | 0 |     char operator[](size_t n) const { | 
| 70 | 0 |         assert(n < size()); | 
| 71 | 0 |         return _data[n]; | 
| 72 | 0 |     } | 
| 73 |  |  | 
| 74 |  |     // Change this slice to refer to an empty array | 
| 75 | 0 |     void clear() { | 
| 76 | 0 |         _data = nullptr; | 
| 77 | 0 |         _size = 0; | 
| 78 | 0 |     } | 
| 79 |  |  | 
| 80 |  |     // Drop the first "n" bytes from this slice. | 
| 81 | 0 |     void remove_prefix(size_t n) { | 
| 82 | 0 |         assert(n <= size()); | 
| 83 | 0 |         _data += n; | 
| 84 | 0 |         _size -= n; | 
| 85 | 0 |     } | 
| 86 |  |  | 
| 87 |  |     // Return a string that contains the copy of the referenced data. | 
| 88 | 0 |     std::string to_string() const { return {_data, _size}; } | 
| 89 |  |  | 
| 90 | 320k |     bool operator==(const CacheKey& other) const { | 
| 91 | 320k |         return ((size() == other.size()) && (memcmp(data(), other.data(), size()) == 0)); | 
| 92 | 320k |     } | 
| 93 |  |  | 
| 94 | 320k |     bool operator!=(const CacheKey& other) const { return !(*this == other); } | 
| 95 |  |  | 
| 96 | 0 |     int compare(const CacheKey& b) const { | 
| 97 | 0 |         const size_t min_len = (_size < b._size) ? _size : b._size; | 
| 98 | 0 |         int r = memcmp(_data, b._data, min_len); | 
| 99 | 0 |         if (r == 0) { | 
| 100 | 0 |             if (_size < b._size) { | 
| 101 | 0 |                 r = -1; | 
| 102 | 0 |             } else if (_size > b._size) { | 
| 103 | 0 |                 r = +1; | 
| 104 | 0 |             } | 
| 105 | 0 |         } | 
| 106 | 0 |         return r; | 
| 107 | 0 |     } | 
| 108 |  |  | 
| 109 |  |     uint32_t hash(const char* data, size_t n, uint32_t seed) const; | 
| 110 |  |  | 
| 111 |  |     // Return true if "x" is a prefix of "*this" | 
| 112 | 0 |     bool starts_with(const CacheKey& x) const { | 
| 113 | 0 |         return ((_size >= x._size) && (memcmp(_data, x._data, x._size) == 0)); | 
| 114 | 0 |     } | 
| 115 |  |  | 
| 116 |  | private: | 
| 117 | 1.19M |     uint32_t _decode_fixed32(const char* ptr) const { | 
| 118 |  |         // Load the raw bytes | 
| 119 | 1.19M |         uint32_t result; | 
| 120 | 1.19M |         memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load | 
| 121 | 1.19M |         return result; | 
| 122 | 1.19M |     } | 
| 123 |  |  | 
| 124 |  |     const char* _data = nullptr; | 
| 125 |  |     size_t _size; | 
| 126 |  | }; | 
| 127 |  |  | 
| 128 |  | // The entry with smaller CachePriority will evict firstly | 
| 129 |  | enum class CachePriority { NORMAL = 0, DURABLE = 1 }; | 
| 130 |  |  | 
| 131 |  | using CachePrunePredicate = std::function<bool(const LRUHandle*)>; | 
| 132 |  | // CacheValueTimeExtractor can extract timestamp | 
| 133 |  | // in cache value through the specified function, | 
| 134 |  | // such as last_visit_time in InvertedIndexSearcherCache::CacheValue | 
| 135 |  | using CacheValueTimeExtractor = std::function<int64_t(const void*)>; | 
| 136 |  | struct PrunedInfo { | 
| 137 |  |     int64_t pruned_count = 0; | 
| 138 |  |     int64_t pruned_size = 0; | 
| 139 |  | }; | 
| 140 |  |  | 
| 141 |  | class Cache { | 
| 142 |  | public: | 
| 143 | 895 |     Cache() = default; | 
| 144 |  |  | 
| 145 |  |     // Destroys all existing entries by calling the "deleter" | 
| 146 |  |     // function that was passed to the constructor. | 
| 147 | 887 |     virtual ~Cache() = default; | 
| 148 |  |  | 
| 149 |  |     // Opaque handle to an entry stored in the cache. | 
| 150 |  |     struct Handle {}; | 
| 151 |  |  | 
| 152 |  |     // Insert a mapping from key->value into the cache and assign it | 
| 153 |  |     // the specified charge against the total cache capacity. | 
| 154 |  |     // | 
| 155 |  |     // Returns a handle that corresponds to the mapping.  The caller | 
| 156 |  |     // must call this->release(handle) when the returned mapping is no | 
| 157 |  |     // longer needed. | 
| 158 |  |     // | 
| 159 |  |     // When the inserted entry is no longer needed, the key and | 
| 160 |  |     // value will be passed to "deleter". | 
| 161 |  |     // | 
| 162 |  |     // if cache is lru k and cache is full, first insert of key will not succeed. | 
| 163 |  |     // | 
| 164 |  |     // Note: if is ShardedLRUCache, cache capacity = ShardedLRUCache_capacity / num_shards. | 
| 165 |  |     virtual Handle* insert(const CacheKey& key, void* value, size_t charge, | 
| 166 |  |                            CachePriority priority = CachePriority::NORMAL) = 0; | 
| 167 |  |  | 
| 168 |  |     // If the cache has no mapping for "key", returns nullptr. | 
| 169 |  |     // | 
| 170 |  |     // Else return a handle that corresponds to the mapping.  The caller | 
| 171 |  |     // must call this->release(handle) when the returned mapping is no | 
| 172 |  |     // longer needed. | 
| 173 |  |     virtual Handle* lookup(const CacheKey& key) = 0; | 
| 174 |  |  | 
| 175 |  |     // Release a mapping returned by a previous Lookup(). | 
| 176 |  |     // REQUIRES: handle must not have been released yet. | 
| 177 |  |     // REQUIRES: handle must have been returned by a method on *this. | 
| 178 |  |     virtual void release(Handle* handle) = 0; | 
| 179 |  |  | 
| 180 |  |     // Return the value encapsulated in a handle returned by a | 
| 181 |  |     // successful lookup(). | 
| 182 |  |     // REQUIRES: handle must not have been released yet. | 
| 183 |  |     // REQUIRES: handle must have been returned by a method on *this. | 
| 184 |  |     virtual void* value(Handle* handle) = 0; | 
| 185 |  |  | 
| 186 |  |     // If the cache contains entry for key, erase it.  Note that the | 
| 187 |  |     // underlying entry will be kept around until all existing handles | 
| 188 |  |     // to it have been released. | 
| 189 |  |     virtual void erase(const CacheKey& key) = 0; | 
| 190 |  |  | 
| 191 |  |     // Return a new numeric id.  May be used by multiple clients who are | 
| 192 |  |     // sharing the same cache to partition the key space.  Typically the | 
| 193 |  |     // client will allocate a new id at startup and prepend the id to | 
| 194 |  |     // its cache keys. | 
| 195 |  |     virtual uint64_t new_id() = 0; | 
| 196 |  |  | 
| 197 |  |     // Remove all cache entries that are not actively in use.  Memory-constrained | 
| 198 |  |     // applications may wish to call this method to reduce memory usage. | 
| 199 |  |     // Default implementation of Prune() does nothing.  Subclasses are strongly | 
| 200 |  |     // encouraged to override the default implementation.  A future release of | 
| 201 |  |     // leveldb may change prune() to a pure abstract method. | 
| 202 |  |     // return num of entries being pruned. | 
| 203 | 0 |     virtual PrunedInfo prune() { return {0, 0}; } | 
| 204 |  |  | 
| 205 |  |     // Same as prune(), but the entry will only be pruned if the predicate matched. | 
| 206 |  |     // NOTICE: the predicate should be simple enough, or the prune_if() function | 
| 207 |  |     // may hold lock for a long time to execute predicate. | 
| 208 | 0 |     virtual PrunedInfo prune_if(CachePrunePredicate pred, bool lazy_mode = false) { return {0, 0}; } | 
| 209 |  |  | 
| 210 |  |     virtual void for_each_entry(const std::function<void(const LRUHandle*)>& visitor) = 0; | 
| 211 |  |  | 
| 212 |  |     virtual int64_t get_usage() = 0; | 
| 213 |  |  | 
| 214 |  |     virtual PrunedInfo set_capacity(size_t capacity) = 0; | 
| 215 |  |     virtual size_t get_capacity() = 0; | 
| 216 |  |  | 
| 217 |  |     virtual size_t get_element_count() = 0; | 
| 218 |  |  | 
| 219 |  | private: | 
| 220 |  |     DISALLOW_COPY_AND_ASSIGN(Cache); | 
| 221 |  | }; | 
| 222 |  |  | 
| 223 |  | // An entry is a variable length heap-allocated structure.  Entries | 
| 224 |  | // are kept in a circular doubly linked list ordered by access time. | 
| 225 |  | // Note: member variables can only be POD types and raw pointer, | 
| 226 |  | // cannot be class objects or smart pointers, because LRUHandle will be created using malloc. | 
| 227 |  | struct LRUHandle { | 
| 228 |  |     void* value = nullptr; | 
| 229 |  |     struct LRUHandle* next_hash = nullptr; // next entry in hash table | 
| 230 |  |     struct LRUHandle* next = nullptr;      // next entry in lru list | 
| 231 |  |     struct LRUHandle* prev = nullptr;      // previous entry in lru list | 
| 232 |  |     size_t charge; | 
| 233 |  |     size_t key_length; | 
| 234 |  |     size_t total_size; // Entry charge, used to limit cache capacity, LRUCacheType::SIZE including key length. | 
| 235 |  |     bool in_cache; // Whether entry is in the cache. | 
| 236 |  |     uint32_t refs; | 
| 237 |  |     uint32_t hash; // Hash of key(); used for fast sharding and comparisons | 
| 238 |  |     CachePriority priority = CachePriority::NORMAL; | 
| 239 |  |     LRUCacheType type; | 
| 240 |  |     int64_t last_visit_time; // Save the last visit time of this cache entry. | 
| 241 |  |     char key_data[1];        // Beginning of key | 
| 242 |  |     // Note! key_data must be at the end. | 
| 243 |  |  | 
| 244 | 647k |     CacheKey key() const { | 
| 245 |  |         // For cheaper lookups, we allow a temporary Handle object | 
| 246 |  |         // to store a pointer to a key in "value". | 
| 247 | 647k |         if (next == this) { | 
| 248 | 0 |             return *(reinterpret_cast<CacheKey*>(value)); | 
| 249 | 647k |         } else { | 
| 250 | 647k |             return {key_data, key_length}; | 
| 251 | 647k |         } | 
| 252 | 647k |     } | 
| 253 |  |  | 
| 254 | 317k |     void free() { | 
| 255 | 317k |         if (value != nullptr) { // value allows null pointer. | 
| 256 | 317k |             delete (LRUCacheValueBase*)value; | 
| 257 | 317k |         } | 
| 258 | 317k |         ::free(this); | 
| 259 | 317k |     } | 
| 260 |  | }; | 
| 261 |  |  | 
| 262 |  | // We provide our own simple hash tablet since it removes a whole bunch | 
| 263 |  | // of porting hacks and is also faster than some of the built-in hash | 
| 264 |  | // tablet implementations in some of the compiler/runtime combinations | 
| 265 |  | // we have tested.  E.g., readrandom speeds up by ~5% over the g++ | 
| 266 |  | // 4.4.3's builtin hashtable. | 
| 267 |  |  | 
| 268 |  | class HandleTable { | 
| 269 |  | public: | 
| 270 | 12.2k |     HandleTable() { _resize(); } | 
| 271 |  |  | 
| 272 |  |     ~HandleTable(); | 
| 273 |  |  | 
| 274 |  |     LRUHandle* lookup(const CacheKey& key, uint32_t hash); | 
| 275 |  |  | 
| 276 |  |     LRUHandle* insert(LRUHandle* h); | 
| 277 |  |  | 
| 278 |  |     // Remove element from hash table by "key" and "hash". | 
| 279 |  |     LRUHandle* remove(const CacheKey& key, uint32_t hash); | 
| 280 |  |  | 
| 281 |  |     // Remove element from hash table by "h", it would be faster | 
| 282 |  |     // than the function above. | 
| 283 |  |     // Return whether h is found and removed. | 
| 284 |  |     bool remove(const LRUHandle* h); | 
| 285 |  |  | 
| 286 |  |     uint32_t element_count() const; | 
| 287 |  |  | 
| 288 |  | private: | 
| 289 |  |     FRIEND_TEST(CacheTest, HandleTableTest); | 
| 290 |  |  | 
| 291 |  |     // The tablet consists of an array of buckets where each bucket is | 
| 292 |  |     // a linked list of cache entries that hash into the bucket. | 
| 293 |  |     uint32_t _length {}; | 
| 294 |  |     uint32_t _elems {}; | 
| 295 |  |     LRUHandle** _list = nullptr; | 
| 296 |  |  | 
| 297 |  |     // Return a pointer to slot that points to a cache entry that | 
| 298 |  |     // matches key/hash.  If there is no such cache entry, return a | 
| 299 |  |     // pointer to the trailing slot in the corresponding linked list. | 
| 300 |  |     LRUHandle** _find_pointer(const CacheKey& key, uint32_t hash); | 
| 301 |  |  | 
| 302 |  |     void _resize(); | 
| 303 |  | }; | 
| 304 |  |  | 
| 305 |  | // pair first is timestatmp, put <timestatmp, LRUHandle*> into asc set, | 
| 306 |  | // when need to free space, can first evict the begin of the set, | 
| 307 |  | // because the begin element's timestamp is the oldest. | 
| 308 |  | using LRUHandleSortedSet = std::set<std::pair<int64_t, LRUHandle*>>; | 
| 309 |  |  | 
| 310 |  | // A single shard of sharded cache. | 
| 311 |  | class LRUCache { | 
| 312 |  | public: | 
| 313 |  |     LRUCache(LRUCacheType type, bool is_lru_k = DEFAULT_LRU_CACHE_IS_LRU_K); | 
| 314 |  |     ~LRUCache(); | 
| 315 |  |  | 
| 316 |  |     // visits_lru_cache_key is the hash value of CacheKey. | 
| 317 |  |     // If there is a hash conflict, a cache entry may be inserted early | 
| 318 |  |     // and another cache entry with the same key hash may be inserted later. | 
| 319 |  |     // Otherwise, this does not affect the correctness of the cache. | 
| 320 |  |     using visits_lru_cache_key = uint32_t; | 
| 321 |  |     using visits_lru_cache_pair = std::pair<visits_lru_cache_key, size_t>; | 
| 322 |  |  | 
| 323 |  |     // Separate from constructor so caller can easily make an array of LRUCache | 
| 324 |  |     PrunedInfo set_capacity(size_t capacity); | 
| 325 | 12.2k |     void set_element_count_capacity(uint32_t element_count_capacity) { | 
| 326 | 12.2k |         _element_count_capacity = element_count_capacity; | 
| 327 | 12.2k |     } | 
| 328 |  |  | 
| 329 |  |     // Like Cache methods, but with an extra "hash" parameter. | 
| 330 |  |     // Must call release on the returned handle pointer. | 
| 331 |  |     Cache::Handle* insert(const CacheKey& key, uint32_t hash, void* value, size_t charge, | 
| 332 |  |                           CachePriority priority = CachePriority::NORMAL); | 
| 333 |  |     Cache::Handle* lookup(const CacheKey& key, uint32_t hash); | 
| 334 |  |     void release(Cache::Handle* handle); | 
| 335 |  |     void erase(const CacheKey& key, uint32_t hash); | 
| 336 |  |     PrunedInfo prune(); | 
| 337 |  |     PrunedInfo prune_if(CachePrunePredicate pred, bool lazy_mode = false); | 
| 338 |  |     void for_each_entry(const std::function<void(const LRUHandle*)>& visitor); | 
| 339 |  |  | 
| 340 |  |     void set_cache_value_time_extractor(CacheValueTimeExtractor cache_value_time_extractor); | 
| 341 |  |     void set_cache_value_check_timestamp(bool cache_value_check_timestamp); | 
| 342 |  |  | 
| 343 |  |     uint64_t get_lookup_count(); | 
| 344 |  |     uint64_t get_hit_count(); | 
| 345 |  |     uint64_t get_miss_count(); | 
| 346 |  |     uint64_t get_stampede_count(); | 
| 347 |  |  | 
| 348 |  |     size_t get_usage(); | 
| 349 |  |     size_t get_capacity(); | 
| 350 |  |     size_t get_element_count(); | 
| 351 |  |  | 
| 352 |  | private: | 
| 353 |  |     void _lru_remove(LRUHandle* e); | 
| 354 |  |     void _lru_append(LRUHandle* list, LRUHandle* e); | 
| 355 |  |     bool _unref(LRUHandle* e); | 
| 356 |  |     void _evict_from_lru(size_t total_size, LRUHandle** to_remove_head); | 
| 357 |  |     void _evict_from_lru_with_time(size_t total_size, LRUHandle** to_remove_head); | 
| 358 |  |     void _evict_one_entry(LRUHandle* e); | 
| 359 |  |     bool _check_element_count_limit(); | 
| 360 |  |     bool _lru_k_insert_visits_list(size_t total_size, visits_lru_cache_key visits_key); | 
| 361 |  |  | 
| 362 |  | private: | 
| 363 |  |     LRUCacheType _type; | 
| 364 |  |  | 
| 365 |  |     // Initialized before use. | 
| 366 |  |     size_t _capacity = 0; | 
| 367 |  |  | 
| 368 |  |     // _mutex protects the following state. | 
| 369 |  |     std::mutex _mutex; | 
| 370 |  |     size_t _usage = 0; | 
| 371 |  |  | 
| 372 |  |     // Dummy head of LRU list. | 
| 373 |  |     // Entries have refs==1 and in_cache==true. | 
| 374 |  |     // _lru_normal.prev is newest entry, _lru_normal.next is oldest entry. | 
| 375 |  |     LRUHandle _lru_normal; | 
| 376 |  |     // _lru_durable.prev is newest entry, _lru_durable.next is oldest entry. | 
| 377 |  |     LRUHandle _lru_durable; | 
| 378 |  |  | 
| 379 |  |     HandleTable _table; | 
| 380 |  |  | 
| 381 |  |     uint64_t _lookup_count = 0; // number of cache lookups | 
| 382 |  |     uint64_t _hit_count = 0;    // number of cache hits | 
| 383 |  |     uint64_t _miss_count = 0;   // number of cache misses | 
| 384 |  |     uint64_t _stampede_count = 0; | 
| 385 |  |  | 
| 386 |  |     CacheValueTimeExtractor _cache_value_time_extractor; | 
| 387 |  |     bool _cache_value_check_timestamp = false; | 
| 388 |  |     LRUHandleSortedSet _sorted_normal_entries_with_timestamp; | 
| 389 |  |     LRUHandleSortedSet _sorted_durable_entries_with_timestamp; | 
| 390 |  |  | 
| 391 |  |     uint32_t _element_count_capacity = 0; | 
| 392 |  |  | 
| 393 |  |     bool _is_lru_k = false; // LRU-K algorithm, K=2 | 
| 394 |  |     std::list<visits_lru_cache_pair> _visits_lru_cache_list; | 
| 395 |  |     std::unordered_map<visits_lru_cache_key, std::list<visits_lru_cache_pair>::iterator> | 
| 396 |  |             _visits_lru_cache_map; | 
| 397 |  |     size_t _visits_lru_cache_usage = 0; | 
| 398 |  | }; | 
| 399 |  |  | 
| 400 |  | class ShardedLRUCache : public Cache { | 
| 401 |  | public: | 
| 402 |  |     ~ShardedLRUCache() override; | 
| 403 |  |     Handle* insert(const CacheKey& key, void* value, size_t charge, | 
| 404 |  |                    CachePriority priority = CachePriority::NORMAL) override; | 
| 405 |  |     Handle* lookup(const CacheKey& key) override; | 
| 406 |  |     void release(Handle* handle) override; | 
| 407 |  |     void erase(const CacheKey& key) override; | 
| 408 |  |     void* value(Handle* handle) override; | 
| 409 |  |     uint64_t new_id() override; | 
| 410 |  |     PrunedInfo prune() override; | 
| 411 |  |     PrunedInfo prune_if(CachePrunePredicate pred, bool lazy_mode = false) override; | 
| 412 |  |     void for_each_entry(const std::function<void(const LRUHandle*)>& visitor) override; | 
| 413 |  |     int64_t get_usage() override; | 
| 414 |  |     size_t get_element_count() override; | 
| 415 |  |     PrunedInfo set_capacity(size_t capacity) override; | 
| 416 |  |     size_t get_capacity() override; | 
| 417 |  |  | 
| 418 |  | private: | 
| 419 |  |     // LRUCache can only be created and managed with LRUCachePolicy. | 
| 420 |  |     friend class LRUCachePolicy; | 
| 421 |  |  | 
| 422 |  |     explicit ShardedLRUCache(const std::string& name, size_t capacity, LRUCacheType type, | 
| 423 |  |                              uint32_t num_shards, uint32_t element_count_capacity, bool is_lru_k); | 
| 424 |  |     explicit ShardedLRUCache(const std::string& name, size_t capacity, LRUCacheType type, | 
| 425 |  |                              uint32_t num_shards, | 
| 426 |  |                              CacheValueTimeExtractor cache_value_time_extractor, | 
| 427 |  |                              bool cache_value_check_timestamp, uint32_t element_count_capacity, | 
| 428 |  |                              bool is_lru_k); | 
| 429 |  |  | 
| 430 |  |     void update_cache_metrics() const; | 
| 431 |  |  | 
| 432 |  | private: | 
| 433 |  |     static uint32_t _hash_slice(const CacheKey& s); | 
| 434 | 1.31M |     uint32_t _shard(uint32_t hash) const { | 
| 435 | 1.31M |         return _num_shard_bits > 0 ? (hash >> (32 - _num_shard_bits)) : 0; | 
| 436 | 1.31M |     } | 
| 437 |  |  | 
| 438 |  |     std::string _name; | 
| 439 |  |     const int _num_shard_bits; | 
| 440 |  |     const uint32_t _num_shards; | 
| 441 |  |     LRUCache** _shards = nullptr; | 
| 442 |  |     std::atomic<uint64_t> _last_id; | 
| 443 |  |     std::mutex _mutex; | 
| 444 |  |     size_t _capacity {0}; | 
| 445 |  |  | 
| 446 |  |     std::shared_ptr<MetricEntity> _entity; | 
| 447 |  |     IntGauge* cache_capacity = nullptr; | 
| 448 |  |     IntGauge* cache_usage = nullptr; | 
| 449 |  |     IntGauge* cache_element_count = nullptr; | 
| 450 |  |     DoubleGauge* cache_usage_ratio = nullptr; | 
| 451 |  |     IntCounter* cache_lookup_count = nullptr; | 
| 452 |  |     IntCounter* cache_hit_count = nullptr; | 
| 453 |  |     IntCounter* cache_miss_count = nullptr; | 
| 454 |  |     IntCounter* cache_stampede_count = nullptr; | 
| 455 |  |     DoubleGauge* cache_hit_ratio = nullptr; | 
| 456 |  |     // bvars | 
| 457 |  |     std::unique_ptr<bvar::Adder<uint64_t>> _hit_count_bvar; | 
| 458 |  |     std::unique_ptr<bvar::PerSecond<bvar::Adder<uint64_t>>> _hit_count_per_second; | 
| 459 |  |     std::unique_ptr<bvar::Adder<uint64_t>> _lookup_count_bvar; | 
| 460 |  |     std::unique_ptr<bvar::PerSecond<bvar::Adder<uint64_t>>> _lookup_count_per_second; | 
| 461 |  | }; | 
| 462 |  |  | 
| 463 |  | // Compatible with ShardedLRUCache usage, but will not actually cache. | 
| 464 |  | class DummyLRUCache : public Cache { | 
| 465 |  | public: | 
| 466 |  |     // Must call release on the returned handle pointer. | 
| 467 |  |     Handle* insert(const CacheKey& key, void* value, size_t charge, | 
| 468 |  |                    CachePriority priority = CachePriority::NORMAL) override; | 
| 469 | 181 |     Handle* lookup(const CacheKey& key) override { return nullptr; }; | 
| 470 |  |     void release(Handle* handle) override; | 
| 471 | 146 |     void erase(const CacheKey& key) override {}; | 
| 472 |  |     void* value(Handle* handle) override; | 
| 473 | 0 |     uint64_t new_id() override { return 0; }; | 
| 474 | 0 |     PrunedInfo prune() override { return {0, 0}; }; | 
| 475 | 0 |     PrunedInfo prune_if(CachePrunePredicate pred, bool lazy_mode = false) override { | 
| 476 | 0 |         return {0, 0}; | 
| 477 | 0 |     }; | 
| 478 | 0 |     void for_each_entry(const std::function<void(const LRUHandle*)>& visitor) override {} | 
| 479 | 0 |     int64_t get_usage() override { return 0; }; | 
| 480 | 0 |     PrunedInfo set_capacity(size_t capacity) override { return {0, 0}; }; | 
| 481 | 0 |     size_t get_capacity() override { return 0; }; | 
| 482 | 0 |     size_t get_element_count() override { return 0; }; | 
| 483 |  | }; | 
| 484 |  |  | 
| 485 |  | } // namespace doris | 
| 486 |  | #include "common/compile_check_end.h" |