/root/doris/be/src/olap/lru_cache.cpp
| Line | Count | Source | 
| 1 |  | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. | 
| 2 |  | // Use of this source code is governed by a BSD-style license that can be | 
| 3 |  | // found in the LICENSE file. See the AUTHORS file for names of contributors. | 
| 4 |  |  | 
| 5 |  | #include "olap/lru_cache.h" | 
| 6 |  |  | 
| 7 |  | #include <cstdlib> | 
| 8 |  | #include <mutex> | 
| 9 |  | #include <new> | 
| 10 |  | #include <sstream> | 
| 11 |  | #include <string> | 
| 12 |  |  | 
| 13 |  | #include "util/metrics.h" | 
| 14 |  | #include "util/time.h" | 
| 15 |  |  | 
| 16 |  | using std::string; | 
| 17 |  | using std::stringstream; | 
| 18 |  |  | 
| 19 |  | namespace doris { | 
| 20 |  | #include "common/compile_check_begin.h" | 
| 21 |  |  | 
| 22 |  | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(cache_capacity, MetricUnit::BYTES); | 
| 23 |  | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(cache_usage, MetricUnit::BYTES); | 
| 24 |  | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(cache_element_count, MetricUnit::NOUNIT); | 
| 25 |  | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(cache_usage_ratio, MetricUnit::NOUNIT); | 
| 26 |  | DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(cache_lookup_count, MetricUnit::OPERATIONS); | 
| 27 |  | DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(cache_hit_count, MetricUnit::OPERATIONS); | 
| 28 |  | DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(cache_miss_count, MetricUnit::OPERATIONS); | 
| 29 |  | DEFINE_COUNTER_METRIC_PROTOTYPE_2ARG(cache_stampede_count, MetricUnit::OPERATIONS); | 
| 30 |  | DEFINE_GAUGE_METRIC_PROTOTYPE_2ARG(cache_hit_ratio, MetricUnit::NOUNIT); | 
| 31 |  |  | 
| 32 | 680k | uint32_t CacheKey::hash(const char* data, size_t n, uint32_t seed) const { | 
| 33 |  |     // Similar to murmur hash | 
| 34 | 680k |     const uint32_t m = 0xc6a4a793; | 
| 35 | 680k |     const uint32_t r = 24; | 
| 36 | 680k |     const char* limit = data + n; | 
| 37 | 680k |     uint32_t h = seed ^ (static_cast<uint32_t>(n) * m); | 
| 38 |  |  | 
| 39 |  |     // Pick up four bytes at a time | 
| 40 | 1.87M |     while (data + 4 <= limit) { | 
| 41 | 1.19M |         uint32_t w = _decode_fixed32(data); | 
| 42 | 1.19M |         data += 4; | 
| 43 | 1.19M |         h += w; | 
| 44 | 1.19M |         h *= m; | 
| 45 | 1.19M |         h ^= (h >> 16); | 
| 46 | 1.19M |     } | 
| 47 |  |  | 
| 48 |  |     // Pick up remaining bytes | 
| 49 | 680k |     switch (limit - data) { | 
| 50 | 11.7k |     case 3: | 
| 51 | 11.7k |         h += static_cast<unsigned char>(data[2]) << 16; | 
| 52 |  |  | 
| 53 |  |         // fall through | 
| 54 | 16.8k |     case 2: | 
| 55 | 16.8k |         h += static_cast<unsigned char>(data[1]) << 8; | 
| 56 |  |  | 
| 57 |  |         // fall through | 
| 58 | 21.7k |     case 1: | 
| 59 | 21.7k |         h += static_cast<unsigned char>(data[0]); | 
| 60 | 21.7k |         h *= m; | 
| 61 | 21.7k |         h ^= (h >> r); | 
| 62 | 21.7k |         break; | 
| 63 |  |  | 
| 64 | 658k |     default: | 
| 65 | 658k |         break; | 
| 66 | 680k |     } | 
| 67 |  |  | 
| 68 | 680k |     return h; | 
| 69 | 680k | } | 
| 70 |  |  | 
| 71 | 11.8k | HandleTable::~HandleTable() { | 
| 72 | 11.8k |     delete[] _list; | 
| 73 | 11.8k | } | 
| 74 |  |  | 
| 75 |  | // LRU cache implementation | 
| 76 | 352k | LRUHandle* HandleTable::lookup(const CacheKey& key, uint32_t hash) { | 
| 77 | 352k |     return *_find_pointer(key, hash); | 
| 78 | 352k | } | 
| 79 |  |  | 
| 80 | 327k | LRUHandle* HandleTable::insert(LRUHandle* h) { | 
| 81 | 327k |     LRUHandle** ptr = _find_pointer(h->key(), h->hash); | 
| 82 | 327k |     LRUHandle* old = *ptr; | 
| 83 | 327k |     h->next_hash = old ? old->next_hash : nullptr; | 
| 84 | 327k |     *ptr = h; | 
| 85 |  |  | 
| 86 | 327k |     if (old == nullptr) { | 
| 87 | 311k |         ++_elems; | 
| 88 | 311k |         if (_elems > _length) { | 
| 89 |  |             // Since each cache entry is fairly large, we aim for a small | 
| 90 |  |             // average linked list length (<= 1). | 
| 91 | 194 |             _resize(); | 
| 92 | 194 |         } | 
| 93 | 311k |     } | 
| 94 |  |  | 
| 95 | 327k |     return old; | 
| 96 | 327k | } | 
| 97 |  |  | 
| 98 | 18 | LRUHandle* HandleTable::remove(const CacheKey& key, uint32_t hash) { | 
| 99 | 18 |     LRUHandle** ptr = _find_pointer(key, hash); | 
| 100 | 18 |     LRUHandle* result = *ptr; | 
| 101 |  |  | 
| 102 | 18 |     if (result != nullptr) { | 
| 103 | 7 |         *ptr = result->next_hash; | 
| 104 | 7 |         _elems--; | 
| 105 | 7 |     } | 
| 106 |  |  | 
| 107 | 18 |     return result; | 
| 108 | 18 | } | 
| 109 |  |  | 
| 110 | 301k | bool HandleTable::remove(const LRUHandle* h) { | 
| 111 | 301k |     LRUHandle** ptr = &(_list[h->hash & (_length - 1)]); | 
| 112 | 313k |     while (*ptr != nullptr && *ptr != h) { | 
| 113 | 11.9k |         ptr = &(*ptr)->next_hash; | 
| 114 | 11.9k |     } | 
| 115 |  |  | 
| 116 | 301k |     LRUHandle* result = *ptr; | 
| 117 | 301k |     if (result != nullptr) { | 
| 118 | 301k |         *ptr = result->next_hash; | 
| 119 | 301k |         _elems--; | 
| 120 | 301k |         return true; | 
| 121 | 301k |     } | 
| 122 | 0 |     return false; | 
| 123 | 301k | } | 
| 124 |  |  | 
| 125 | 680k | LRUHandle** HandleTable::_find_pointer(const CacheKey& key, uint32_t hash) { | 
| 126 | 680k |     LRUHandle** ptr = &(_list[hash & (_length - 1)]); | 
| 127 | 1.09M |     while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) { | 
| 128 | 415k |         ptr = &(*ptr)->next_hash; | 
| 129 | 415k |     } | 
| 130 |  |  | 
| 131 | 680k |     return ptr; | 
| 132 | 680k | } | 
| 133 |  |  | 
| 134 | 12.4k | void HandleTable::_resize() { | 
| 135 | 12.4k |     uint32_t new_length = 16; | 
| 136 | 13.0k |     while (new_length < _elems * 1.5) { | 
| 137 | 598 |         new_length *= 2; | 
| 138 | 598 |     } | 
| 139 |  |  | 
| 140 | 12.4k |     auto** new_list = new (std::nothrow) LRUHandle*[new_length]; | 
| 141 | 12.4k |     memset(new_list, 0, sizeof(new_list[0]) * new_length); | 
| 142 |  |  | 
| 143 | 12.4k |     uint32_t count = 0; | 
| 144 | 96.3k |     for (uint32_t i = 0; i < _length; i++) { | 
| 145 | 83.8k |         LRUHandle* h = _list[i]; | 
| 146 | 167k |         while (h != nullptr) { | 
| 147 | 84.0k |             LRUHandle* next = h->next_hash; | 
| 148 | 84.0k |             uint32_t hash = h->hash; | 
| 149 | 84.0k |             LRUHandle** ptr = &new_list[hash & (new_length - 1)]; | 
| 150 | 84.0k |             h->next_hash = *ptr; | 
| 151 | 84.0k |             *ptr = h; | 
| 152 | 84.0k |             h = next; | 
| 153 | 84.0k |             count++; | 
| 154 | 84.0k |         } | 
| 155 | 83.8k |     } | 
| 156 |  |  | 
| 157 | 12.4k |     DCHECK_EQ(_elems, count); | 
| 158 | 12.4k |     delete[] _list; | 
| 159 | 12.4k |     _list = new_list; | 
| 160 | 12.4k |     _length = new_length; | 
| 161 | 12.4k | } | 
| 162 |  |  | 
| 163 | 417 | uint32_t HandleTable::element_count() const { | 
| 164 | 417 |     return _elems; | 
| 165 | 417 | } | 
| 166 |  |  | 
| 167 | 12.2k | LRUCache::LRUCache(LRUCacheType type, bool is_lru_k) : _type(type), _is_lru_k(is_lru_k) { | 
| 168 |  |     // Make empty circular linked list | 
| 169 | 12.2k |     _lru_normal.next = &_lru_normal; | 
| 170 | 12.2k |     _lru_normal.prev = &_lru_normal; | 
| 171 | 12.2k |     _lru_durable.next = &_lru_durable; | 
| 172 | 12.2k |     _lru_durable.prev = &_lru_durable; | 
| 173 | 12.2k | } | 
| 174 |  |  | 
| 175 | 11.8k | LRUCache::~LRUCache() { | 
| 176 | 11.8k |     prune(); | 
| 177 | 11.8k | } | 
| 178 |  |  | 
| 179 | 12.3k | PrunedInfo LRUCache::set_capacity(size_t capacity) { | 
| 180 | 12.3k |     LRUHandle* last_ref_list = nullptr; | 
| 181 | 12.3k |     { | 
| 182 | 12.3k |         std::lock_guard l(_mutex); | 
| 183 | 12.3k |         if (capacity > _capacity) { | 
| 184 | 12.2k |             _capacity = capacity; | 
| 185 | 12.2k |             return {0, 0}; | 
| 186 | 12.2k |         } | 
| 187 | 8 |         _capacity = capacity; | 
| 188 | 8 |         _evict_from_lru(0, &last_ref_list); | 
| 189 | 8 |     } | 
| 190 |  |  | 
| 191 | 0 |     int64_t pruned_count = 0; | 
| 192 | 8 |     int64_t pruned_size = 0; | 
| 193 | 56.0k |     while (last_ref_list != nullptr) { | 
| 194 | 55.9k |         ++pruned_count; | 
| 195 | 55.9k |         pruned_size += last_ref_list->total_size; | 
| 196 | 55.9k |         LRUHandle* next = last_ref_list->next; | 
| 197 | 55.9k |         last_ref_list->free(); | 
| 198 | 55.9k |         last_ref_list = next; | 
| 199 | 55.9k |     } | 
| 200 | 8 |     return {pruned_count, pruned_size}; | 
| 201 | 12.3k | } | 
| 202 |  |  | 
| 203 | 0 | uint64_t LRUCache::get_lookup_count() { | 
| 204 | 0 |     std::lock_guard l(_mutex); | 
| 205 | 0 |     return _lookup_count; | 
| 206 | 0 | } | 
| 207 |  |  | 
| 208 | 0 | uint64_t LRUCache::get_hit_count() { | 
| 209 | 0 |     std::lock_guard l(_mutex); | 
| 210 | 0 |     return _hit_count; | 
| 211 | 0 | } | 
| 212 |  |  | 
| 213 | 0 | uint64_t LRUCache::get_stampede_count() { | 
| 214 | 0 |     std::lock_guard l(_mutex); | 
| 215 | 0 |     return _stampede_count; | 
| 216 | 0 | } | 
| 217 |  |  | 
| 218 | 0 | uint64_t LRUCache::get_miss_count() { | 
| 219 | 0 |     std::lock_guard l(_mutex); | 
| 220 | 0 |     return _miss_count; | 
| 221 | 0 | } | 
| 222 |  |  | 
| 223 | 16.1k | size_t LRUCache::get_usage() { | 
| 224 | 16.1k |     std::lock_guard l(_mutex); | 
| 225 | 16.1k |     return _usage; | 
| 226 | 16.1k | } | 
| 227 |  |  | 
| 228 | 0 | size_t LRUCache::get_capacity() { | 
| 229 | 0 |     std::lock_guard l(_mutex); | 
| 230 | 0 |     return _capacity; | 
| 231 | 0 | } | 
| 232 |  |  | 
| 233 | 192 | size_t LRUCache::get_element_count() { | 
| 234 | 192 |     std::lock_guard l(_mutex); | 
| 235 | 192 |     return _table.element_count(); | 
| 236 | 192 | } | 
| 237 |  |  | 
| 238 | 948k | bool LRUCache::_unref(LRUHandle* e) { | 
| 239 | 948k |     DCHECK(e->refs > 0); | 
| 240 | 948k |     e->refs--; | 
| 241 | 948k |     return e->refs == 0; | 
| 242 | 948k | } | 
| 243 |  |  | 
| 244 | 573k | void LRUCache::_lru_remove(LRUHandle* e) { | 
| 245 | 573k |     e->next->prev = e->prev; | 
| 246 | 573k |     e->prev->next = e->next; | 
| 247 | 573k |     e->prev = e->next = nullptr; | 
| 248 |  |  | 
| 249 | 573k |     if (_cache_value_check_timestamp) { | 
| 250 | 131 |         if (e->priority == CachePriority::NORMAL) { | 
| 251 | 131 |             auto pair = std::make_pair(_cache_value_time_extractor(e->value), e); | 
| 252 | 131 |             auto found_it = _sorted_normal_entries_with_timestamp.find(pair); | 
| 253 | 131 |             if (found_it != _sorted_normal_entries_with_timestamp.end()) { | 
| 254 | 131 |                 _sorted_normal_entries_with_timestamp.erase(found_it); | 
| 255 | 131 |             } | 
| 256 | 131 |         } else if (e->priority == CachePriority::DURABLE) { | 
| 257 | 0 |             auto pair = std::make_pair(_cache_value_time_extractor(e->value), e); | 
| 258 | 0 |             auto found_it = _sorted_durable_entries_with_timestamp.find(pair); | 
| 259 | 0 |             if (found_it != _sorted_durable_entries_with_timestamp.end()) { | 
| 260 | 0 |                 _sorted_durable_entries_with_timestamp.erase(found_it); | 
| 261 | 0 |             } | 
| 262 | 0 |         } | 
| 263 | 131 |     } | 
| 264 | 573k | } | 
| 265 |  |  | 
| 266 | 583k | void LRUCache::_lru_append(LRUHandle* list, LRUHandle* e) { | 
| 267 |  |     // Make "e" newest entry by inserting just before *list | 
| 268 | 583k |     e->next = list; | 
| 269 | 583k |     e->prev = list->prev; | 
| 270 | 583k |     e->prev->next = e; | 
| 271 | 583k |     e->next->prev = e; | 
| 272 |  |  | 
| 273 |  |     // _cache_value_check_timestamp is true, | 
| 274 |  |     // means evict entry will depends on the timestamp asc set, | 
| 275 |  |     // the timestamp is updated by higher level caller, | 
| 276 |  |     // and the timestamp of hit entry is different with the insert entry, | 
| 277 |  |     // that is why need check timestamp to evict entry, | 
| 278 |  |     // in order to keep the survival time of hit entries | 
| 279 |  |     // longer than the entries just inserted, | 
| 280 |  |     // so use asc set to sorted these entries's timestamp and LRUHandle* | 
| 281 | 583k |     if (_cache_value_check_timestamp) { | 
| 282 | 131 |         if (e->priority == CachePriority::NORMAL) { | 
| 283 | 131 |             _sorted_normal_entries_with_timestamp.insert( | 
| 284 | 131 |                     std::make_pair(_cache_value_time_extractor(e->value), e)); | 
| 285 | 131 |         } else if (e->priority == CachePriority::DURABLE) { | 
| 286 | 0 |             _sorted_durable_entries_with_timestamp.insert( | 
| 287 | 0 |                     std::make_pair(_cache_value_time_extractor(e->value), e)); | 
| 288 | 0 |         } | 
| 289 | 131 |     } | 
| 290 | 583k | } | 
| 291 |  |  | 
| 292 | 352k | Cache::Handle* LRUCache::lookup(const CacheKey& key, uint32_t hash) { | 
| 293 | 352k |     std::lock_guard l(_mutex); | 
| 294 | 352k |     ++_lookup_count; | 
| 295 | 352k |     LRUHandle* e = _table.lookup(key, hash); | 
| 296 | 352k |     if (e != nullptr) { | 
| 297 |  |         // we get it from _table, so in_cache must be true | 
| 298 | 304k |         DCHECK(e->in_cache); | 
| 299 | 304k |         if (e->refs == 1) { | 
| 300 |  |             // only in LRU free list, remove it from list | 
| 301 | 300k |             _lru_remove(e); | 
| 302 | 300k |         } | 
| 303 | 304k |         e->refs++; | 
| 304 | 304k |         ++_hit_count; | 
| 305 | 304k |         e->last_visit_time = UnixMillis(); | 
| 306 | 304k |     } else { | 
| 307 | 48.8k |         ++_miss_count; | 
| 308 | 48.8k |     } | 
| 309 |  |  | 
| 310 |  |     // If key not exist in cache, and is lru k cache, and key in visits list, | 
| 311 |  |     // then move the key to beginning of the visits list. | 
| 312 |  |     // key in visits list indicates that the key has been inserted once after the cache is full. | 
| 313 | 352k |     if (e == nullptr && _is_lru_k) { | 
| 314 | 214 |         auto it = _visits_lru_cache_map.find(hash); | 
| 315 | 214 |         if (it != _visits_lru_cache_map.end()) { | 
| 316 | 160 |             _visits_lru_cache_list.splice(_visits_lru_cache_list.begin(), _visits_lru_cache_list, | 
| 317 | 160 |                                           it->second); | 
| 318 | 160 |         } | 
| 319 | 214 |     } | 
| 320 | 352k |     return reinterpret_cast<Cache::Handle*>(e); | 
| 321 | 352k | } | 
| 322 |  |  | 
| 323 | 631k | void LRUCache::release(Cache::Handle* handle) { | 
| 324 | 631k |     if (handle == nullptr) { | 
| 325 | 0 |         return; | 
| 326 | 0 |     } | 
| 327 | 631k |     auto* e = reinterpret_cast<LRUHandle*>(handle); | 
| 328 | 631k |     bool last_ref = false; | 
| 329 | 631k |     { | 
| 330 | 631k |         std::lock_guard l(_mutex); | 
| 331 |  |         // if last_ref is true, key may have been evict from the cache, | 
| 332 |  |         // or if it is lru k, first insert of key may have failed. | 
| 333 | 631k |         last_ref = _unref(e); | 
| 334 | 631k |         if (e->in_cache && e->refs == 1) { | 
| 335 |  |             // only exists in cache | 
| 336 | 611k |             if (_usage > _capacity) { | 
| 337 |  |                 // take this opportunity and remove the item | 
| 338 | 28.3k |                 bool removed = _table.remove(e); | 
| 339 | 28.3k |                 DCHECK(removed); | 
| 340 | 28.3k |                 e->in_cache = false; | 
| 341 | 28.3k |                 _unref(e); | 
| 342 |  |                 // `entry->in_cache = false` and `_usage -= entry->total_size;` and `_unref(entry)` should appear together. | 
| 343 |  |                 // see the comment for old entry in `LRUCache::insert`. | 
| 344 | 28.3k |                 _usage -= e->total_size; | 
| 345 | 28.3k |                 last_ref = true; | 
| 346 | 583k |             } else { | 
| 347 |  |                 // put it to LRU free list | 
| 348 | 583k |                 if (e->priority == CachePriority::NORMAL) { | 
| 349 | 583k |                     _lru_append(&_lru_normal, e); | 
| 350 | 583k |                 } else if (e->priority == CachePriority::DURABLE) { | 
| 351 | 18 |                     _lru_append(&_lru_durable, e); | 
| 352 | 18 |                 } | 
| 353 | 583k |             } | 
| 354 | 611k |         } | 
| 355 | 631k |     } | 
| 356 |  |  | 
| 357 |  |     // free handle out of mutex | 
| 358 | 631k |     if (last_ref) { | 
| 359 | 44.8k |         e->free(); | 
| 360 | 44.8k |     } | 
| 361 | 631k | } | 
| 362 |  |  | 
| 363 | 105 | void LRUCache::_evict_from_lru_with_time(size_t total_size, LRUHandle** to_remove_head) { | 
| 364 |  |     // 1. evict normal cache entries | 
| 365 | 124 |     while ((_usage + total_size > _capacity || _check_element_count_limit()) && | 
| 366 | 124 |            !_sorted_normal_entries_with_timestamp.empty()) { | 
| 367 | 19 |         auto entry_pair = _sorted_normal_entries_with_timestamp.begin(); | 
| 368 | 19 |         LRUHandle* remove_handle = entry_pair->second; | 
| 369 | 19 |         DCHECK(remove_handle != nullptr); | 
| 370 | 19 |         DCHECK(remove_handle->priority == CachePriority::NORMAL); | 
| 371 | 19 |         _evict_one_entry(remove_handle); | 
| 372 | 19 |         remove_handle->next = *to_remove_head; | 
| 373 | 19 |         *to_remove_head = remove_handle; | 
| 374 | 19 |     } | 
| 375 |  |  | 
| 376 |  |     // 2. evict durable cache entries if need | 
| 377 | 105 |     while ((_usage + total_size > _capacity || _check_element_count_limit()) && | 
| 378 | 105 |            !_sorted_durable_entries_with_timestamp.empty()) { | 
| 379 | 0 |         auto entry_pair = _sorted_durable_entries_with_timestamp.begin(); | 
| 380 | 0 |         LRUHandle* remove_handle = entry_pair->second; | 
| 381 | 0 |         DCHECK(remove_handle != nullptr); | 
| 382 | 0 |         DCHECK(remove_handle->priority == CachePriority::DURABLE); | 
| 383 | 0 |         _evict_one_entry(remove_handle); | 
| 384 | 0 |         remove_handle->next = *to_remove_head; | 
| 385 | 0 |         *to_remove_head = remove_handle; | 
| 386 | 0 |     } | 
| 387 | 105 | } | 
| 388 |  |  | 
| 389 | 327k | void LRUCache::_evict_from_lru(size_t total_size, LRUHandle** to_remove_head) { | 
| 390 |  |     // 1. evict normal cache entries | 
| 391 | 588k |     while ((_usage + total_size > _capacity || _check_element_count_limit()) && | 
| 392 | 588k |            _lru_normal.next != &_lru_normal) { | 
| 393 | 261k |         LRUHandle* old = _lru_normal.next; | 
| 394 | 261k |         DCHECK(old->priority == CachePriority::NORMAL); | 
| 395 | 261k |         _evict_one_entry(old); | 
| 396 | 261k |         old->next = *to_remove_head; | 
| 397 | 261k |         *to_remove_head = old; | 
| 398 | 261k |     } | 
| 399 |  |     // 2. evict durable cache entries if need | 
| 400 | 327k |     while ((_usage + total_size > _capacity || _check_element_count_limit()) && | 
| 401 | 327k |            _lru_durable.next != &_lru_durable) { | 
| 402 | 4 |         LRUHandle* old = _lru_durable.next; | 
| 403 | 4 |         DCHECK(old->priority == CachePriority::DURABLE); | 
| 404 | 4 |         _evict_one_entry(old); | 
| 405 | 4 |         old->next = *to_remove_head; | 
| 406 | 4 |         *to_remove_head = old; | 
| 407 | 4 |     } | 
| 408 | 327k | } | 
| 409 |  |  | 
| 410 | 272k | void LRUCache::_evict_one_entry(LRUHandle* e) { | 
| 411 | 272k |     DCHECK(e->in_cache); | 
| 412 | 272k |     DCHECK(e->refs == 1); // LRU list contains elements which may be evicted | 
| 413 | 272k |     _lru_remove(e); | 
| 414 | 272k |     bool removed = _table.remove(e); | 
| 415 | 272k |     DCHECK(removed); | 
| 416 | 272k |     e->in_cache = false; | 
| 417 | 272k |     _unref(e); | 
| 418 |  |     // `entry->in_cache = false` and `_usage -= entry->total_size;` and `_unref(entry)` should appear together. | 
| 419 |  |     // see the comment for old entry in `LRUCache::insert`. | 
| 420 | 272k |     _usage -= e->total_size; | 
| 421 | 272k | } | 
| 422 |  |  | 
| 423 | 581k | bool LRUCache::_check_element_count_limit() { | 
| 424 | 581k |     return _element_count_capacity != 0 && _table.element_count() >= _element_count_capacity; | 
| 425 | 581k | } | 
| 426 |  |  | 
| 427 |  | // After cache is full, | 
| 428 |  | // 1.Return false. If key has been inserted into the visits list before, | 
| 429 |  | // key is allowed to be inserted into cache this time (this will trigger cache evict), | 
| 430 |  | // and key is removed from the visits list. | 
| 431 |  | // 2. Return true. If key not in visits list, insert it into visits list. | 
| 432 | 707 | bool LRUCache::_lru_k_insert_visits_list(size_t total_size, visits_lru_cache_key visits_key) { | 
| 433 | 707 |     if (_usage + total_size > _capacity || | 
| 434 | 707 |         _check_element_count_limit()) { // this line no lock required | 
| 435 | 621 |         auto it = _visits_lru_cache_map.find(visits_key); | 
| 436 | 621 |         if (it != _visits_lru_cache_map.end()) { | 
| 437 | 162 |             _visits_lru_cache_usage -= it->second->second; | 
| 438 | 162 |             _visits_lru_cache_list.erase(it->second); | 
| 439 | 162 |             _visits_lru_cache_map.erase(it); | 
| 440 | 459 |         } else { | 
| 441 |  |             // _visits_lru_cache_list capacity is same as the cache itself. | 
| 442 |  |             // If _visits_lru_cache_list is full, some keys will also be evict. | 
| 443 | 738 |             while (_visits_lru_cache_usage + total_size > _capacity && | 
| 444 | 738 |                    _visits_lru_cache_usage != 0) { | 
| 445 | 279 |                 DCHECK(!_visits_lru_cache_map.empty()); | 
| 446 | 279 |                 _visits_lru_cache_usage -= _visits_lru_cache_list.back().second; | 
| 447 | 279 |                 _visits_lru_cache_map.erase(_visits_lru_cache_list.back().first); | 
| 448 | 279 |                 _visits_lru_cache_list.pop_back(); | 
| 449 | 279 |             } | 
| 450 |  |             // 1. If true, insert key at the beginning of _visits_lru_cache_list. | 
| 451 |  |             // 2. If false, it means total_size > cache _capacity, preventing this insert. | 
| 452 | 459 |             if (_visits_lru_cache_usage + total_size <= _capacity) { | 
| 453 | 457 |                 _visits_lru_cache_list.emplace_front(visits_key, total_size); | 
| 454 | 457 |                 _visits_lru_cache_map[visits_key] = _visits_lru_cache_list.begin(); | 
| 455 | 457 |                 _visits_lru_cache_usage += total_size; | 
| 456 | 457 |             } | 
| 457 | 459 |             return true; | 
| 458 | 459 |         } | 
| 459 | 621 |     } | 
| 460 | 248 |     return false; | 
| 461 | 707 | } | 
| 462 |  |  | 
| 463 |  | Cache::Handle* LRUCache::insert(const CacheKey& key, uint32_t hash, void* value, size_t charge, | 
| 464 | 327k |                                 CachePriority priority) { | 
| 465 | 327k |     size_t handle_size = sizeof(LRUHandle) - 1 + key.size(); | 
| 466 | 327k |     auto* e = reinterpret_cast<LRUHandle*>(malloc(handle_size)); | 
| 467 | 327k |     e->value = value; | 
| 468 | 327k |     e->charge = charge; | 
| 469 | 327k |     e->key_length = key.size(); | 
| 470 |  |     // if LRUCacheType::NUMBER, charge not add handle_size, | 
| 471 |  |     // because charge at this time is no longer the memory size, but an weight. | 
| 472 | 327k |     e->total_size = (_type == LRUCacheType::SIZE ? handle_size + charge : charge); | 
| 473 | 327k |     e->hash = hash; | 
| 474 | 327k |     e->refs = 1; // only one for the returned handle. | 
| 475 | 327k |     e->next = e->prev = nullptr; | 
| 476 | 327k |     e->in_cache = false; | 
| 477 | 327k |     e->priority = priority; | 
| 478 | 327k |     e->type = _type; | 
| 479 | 327k |     memcpy(e->key_data, key.data(), key.size()); | 
| 480 | 327k |     e->last_visit_time = UnixMillis(); | 
| 481 |  |  | 
| 482 | 327k |     LRUHandle* to_remove_head = nullptr; | 
| 483 | 327k |     { | 
| 484 | 327k |         std::lock_guard l(_mutex); | 
| 485 |  |  | 
| 486 | 327k |         if (_is_lru_k && _lru_k_insert_visits_list(e->total_size, hash)) { | 
| 487 | 459 |             return reinterpret_cast<Cache::Handle*>(e); | 
| 488 | 459 |         } | 
| 489 |  |  | 
| 490 |  |         // Free the space following strict LRU policy until enough space | 
| 491 |  |         // is freed or the lru list is empty | 
| 492 | 327k |         if (_cache_value_check_timestamp) { | 
| 493 | 105 |             _evict_from_lru_with_time(e->total_size, &to_remove_head); | 
| 494 | 327k |         } else { | 
| 495 | 327k |             _evict_from_lru(e->total_size, &to_remove_head); | 
| 496 | 327k |         } | 
| 497 |  |  | 
| 498 |  |         // insert into the cache | 
| 499 |  |         // note that the cache might get larger than its capacity if not enough | 
| 500 |  |         // space was freed | 
| 501 | 327k |         auto* old = _table.insert(e); | 
| 502 | 327k |         e->in_cache = true; | 
| 503 | 327k |         _usage += e->total_size; | 
| 504 | 327k |         e->refs++; // one for the returned handle, one for LRUCache. | 
| 505 | 327k |         if (old != nullptr) { | 
| 506 | 16.1k |             _stampede_count++; | 
| 507 | 16.1k |             old->in_cache = false; | 
| 508 |  |             // `entry->in_cache = false` and `_usage -= entry->total_size;` and `_unref(entry)` should appear together. | 
| 509 |  |             // Whether the reference of the old entry is 0, the cache usage is subtracted here, | 
| 510 |  |             // because the old entry has been removed from the cache and should not be counted in the cache capacity, | 
| 511 |  |             // but the memory of the old entry is still tracked by the cache memory_tracker. | 
| 512 |  |             // After all the old handles are released, the old entry will be freed and the memory of the old entry | 
| 513 |  |             // will be released from the cache memory_tracker. | 
| 514 | 16.1k |             _usage -= old->total_size; | 
| 515 |  |             // if false, old entry is being used externally, just ref-- and sub _usage, | 
| 516 | 16.1k |             if (_unref(old)) { | 
| 517 |  |                 // old is on LRU because it's in cache and its reference count | 
| 518 |  |                 // was just 1 (Unref returned 0) | 
| 519 | 99 |                 _lru_remove(old); | 
| 520 | 99 |                 old->next = to_remove_head; | 
| 521 | 99 |                 to_remove_head = old; | 
| 522 | 99 |             } | 
| 523 | 16.1k |         } | 
| 524 | 327k |     } | 
| 525 |  |  | 
| 526 |  |     // we free the entries here outside of mutex for | 
| 527 |  |     // performance reasons | 
| 528 | 533k |     while (to_remove_head != nullptr) { | 
| 529 | 205k |         LRUHandle* next = to_remove_head->next; | 
| 530 | 205k |         to_remove_head->free(); | 
| 531 | 205k |         to_remove_head = next; | 
| 532 | 205k |     } | 
| 533 |  |  | 
| 534 | 327k |     return reinterpret_cast<Cache::Handle*>(e); | 
| 535 | 327k | } | 
| 536 |  |  | 
| 537 | 15 | void LRUCache::erase(const CacheKey& key, uint32_t hash) { | 
| 538 | 15 |     LRUHandle* e = nullptr; | 
| 539 | 15 |     bool last_ref = false; | 
| 540 | 15 |     { | 
| 541 | 15 |         std::lock_guard l(_mutex); | 
| 542 | 15 |         e = _table.remove(key, hash); | 
| 543 | 15 |         if (e != nullptr) { | 
| 544 | 4 |             last_ref = _unref(e); | 
| 545 |  |             // if last_ref is false or in_cache is false, e must not be in lru | 
| 546 | 4 |             if (last_ref && e->in_cache) { | 
| 547 |  |                 // locate in free list | 
| 548 | 1 |                 _lru_remove(e); | 
| 549 | 1 |             } | 
| 550 | 4 |             e->in_cache = false; | 
| 551 |  |             // `entry->in_cache = false` and `_usage -= entry->total_size;` and `_unref(entry)` should appear together. | 
| 552 |  |             // see the comment for old entry in `LRUCache::insert`. | 
| 553 | 4 |             _usage -= e->total_size; | 
| 554 | 4 |         } | 
| 555 | 15 |     } | 
| 556 |  |     // free handle out of mutex, when last_ref is true, e must not be nullptr | 
| 557 | 15 |     if (last_ref) { | 
| 558 | 1 |         e->free(); | 
| 559 | 1 |     } | 
| 560 | 15 | } | 
| 561 |  |  | 
| 562 | 11.8k | PrunedInfo LRUCache::prune() { | 
| 563 | 11.8k |     LRUHandle* to_remove_head = nullptr; | 
| 564 | 11.8k |     { | 
| 565 | 11.8k |         std::lock_guard l(_mutex); | 
| 566 | 22.7k |         while (_lru_normal.next != &_lru_normal) { | 
| 567 | 10.9k |             LRUHandle* old = _lru_normal.next; | 
| 568 | 10.9k |             _evict_one_entry(old); | 
| 569 | 10.9k |             old->next = to_remove_head; | 
| 570 | 10.9k |             to_remove_head = old; | 
| 571 | 10.9k |         } | 
| 572 | 11.8k |         while (_lru_durable.next != &_lru_durable) { | 
| 573 | 6 |             LRUHandle* old = _lru_durable.next; | 
| 574 | 6 |             _evict_one_entry(old); | 
| 575 | 6 |             old->next = to_remove_head; | 
| 576 | 6 |             to_remove_head = old; | 
| 577 | 6 |         } | 
| 578 | 11.8k |     } | 
| 579 | 11.8k |     int64_t pruned_count = 0; | 
| 580 | 11.8k |     int64_t pruned_size = 0; | 
| 581 | 22.8k |     while (to_remove_head != nullptr) { | 
| 582 | 10.9k |         ++pruned_count; | 
| 583 | 10.9k |         pruned_size += to_remove_head->total_size; | 
| 584 | 10.9k |         LRUHandle* next = to_remove_head->next; | 
| 585 | 10.9k |         to_remove_head->free(); | 
| 586 | 10.9k |         to_remove_head = next; | 
| 587 | 10.9k |     } | 
| 588 | 11.8k |     return {pruned_count, pruned_size}; | 
| 589 | 11.8k | } | 
| 590 |  |  | 
| 591 | 8 | PrunedInfo LRUCache::prune_if(CachePrunePredicate pred, bool lazy_mode) { | 
| 592 | 8 |     LRUHandle* to_remove_head = nullptr; | 
| 593 | 8 |     { | 
| 594 | 8 |         std::lock_guard l(_mutex); | 
| 595 | 8 |         LRUHandle* p = _lru_normal.next; | 
| 596 | 23 |         while (p != &_lru_normal) { | 
| 597 | 18 |             LRUHandle* next = p->next; | 
| 598 | 18 |             if (pred(p)) { | 
| 599 | 10 |                 _evict_one_entry(p); | 
| 600 | 10 |                 p->next = to_remove_head; | 
| 601 | 10 |                 to_remove_head = p; | 
| 602 | 10 |             } else if (lazy_mode) { | 
| 603 | 3 |                 break; | 
| 604 | 3 |             } | 
| 605 | 15 |             p = next; | 
| 606 | 15 |         } | 
| 607 |  |  | 
| 608 | 8 |         p = _lru_durable.next; | 
| 609 | 15 |         while (p != &_lru_durable) { | 
| 610 | 10 |             LRUHandle* next = p->next; | 
| 611 | 10 |             if (pred(p)) { | 
| 612 | 2 |                 _evict_one_entry(p); | 
| 613 | 2 |                 p->next = to_remove_head; | 
| 614 | 2 |                 to_remove_head = p; | 
| 615 | 8 |             } else if (lazy_mode) { | 
| 616 | 3 |                 break; | 
| 617 | 3 |             } | 
| 618 | 7 |             p = next; | 
| 619 | 7 |         } | 
| 620 | 8 |     } | 
| 621 | 8 |     int64_t pruned_count = 0; | 
| 622 | 8 |     int64_t pruned_size = 0; | 
| 623 | 20 |     while (to_remove_head != nullptr) { | 
| 624 | 12 |         ++pruned_count; | 
| 625 | 12 |         pruned_size += to_remove_head->total_size; | 
| 626 | 12 |         LRUHandle* next = to_remove_head->next; | 
| 627 | 12 |         to_remove_head->free(); | 
| 628 | 12 |         to_remove_head = next; | 
| 629 | 12 |     } | 
| 630 | 8 |     return {pruned_count, pruned_size}; | 
| 631 | 8 | } | 
| 632 |  |  | 
| 633 | 0 | void LRUCache::for_each_entry(const std::function<void(const LRUHandle*)>& visitor) { | 
| 634 | 0 |     std::lock_guard l(_mutex); | 
| 635 | 0 |     for (LRUHandle* p = _lru_normal.next; p != &_lru_normal; p = p->next) { | 
| 636 | 0 |         visitor(p); | 
| 637 | 0 |     } | 
| 638 | 0 |     for (LRUHandle* p = _lru_durable.next; p != &_lru_durable; p = p->next) { | 
| 639 | 0 |         visitor(p); | 
| 640 | 0 |     } | 
| 641 | 0 | } | 
| 642 |  |  | 
| 643 | 114 | void LRUCache::set_cache_value_time_extractor(CacheValueTimeExtractor cache_value_time_extractor) { | 
| 644 | 114 |     _cache_value_time_extractor = cache_value_time_extractor; | 
| 645 | 114 | } | 
| 646 |  |  | 
| 647 | 114 | void LRUCache::set_cache_value_check_timestamp(bool cache_value_check_timestamp) { | 
| 648 | 114 |     _cache_value_check_timestamp = cache_value_check_timestamp; | 
| 649 | 114 | } | 
| 650 |  |  | 
| 651 | 680k | inline uint32_t ShardedLRUCache::_hash_slice(const CacheKey& s) { | 
| 652 | 680k |     return s.hash(s.data(), s.size(), 0); | 
| 653 | 680k | } | 
| 654 |  |  | 
| 655 |  | ShardedLRUCache::ShardedLRUCache(const std::string& name, size_t capacity, LRUCacheType type, | 
| 656 |  |                                  uint32_t num_shards, uint32_t total_element_count_capacity, | 
| 657 |  |                                  bool is_lru_k) | 
| 658 | 864 |         : _name(name), | 
| 659 | 864 |           _num_shard_bits(__builtin_ctz(num_shards)), | 
| 660 | 864 |           _num_shards(num_shards), | 
| 661 | 864 |           _last_id(1), | 
| 662 | 864 |           _capacity(capacity) { | 
| 663 | 864 |     CHECK(num_shards > 0) << "num_shards cannot be 0"; | 
| 664 | 864 |     CHECK_EQ((num_shards & (num_shards - 1)), 0) | 
| 665 | 0 |             << "num_shards should be power of two, but got " << num_shards; | 
| 666 |  |  | 
| 667 | 864 |     const size_t per_shard = (capacity + (_num_shards - 1)) / _num_shards; | 
| 668 | 864 |     const uint32_t per_shard_element_count_capacity = | 
| 669 | 864 |             (total_element_count_capacity + (_num_shards - 1)) / _num_shards; | 
| 670 | 864 |     auto** shards = new (std::nothrow) LRUCache*[_num_shards]; | 
| 671 | 13.1k |     for (int s = 0; s < _num_shards; s++) { | 
| 672 | 12.2k |         shards[s] = new LRUCache(type, is_lru_k); | 
| 673 | 12.2k |         shards[s]->set_capacity(per_shard); | 
| 674 | 12.2k |         shards[s]->set_element_count_capacity(per_shard_element_count_capacity); | 
| 675 | 12.2k |     } | 
| 676 | 864 |     _shards = shards; | 
| 677 |  |  | 
| 678 | 864 |     _entity = DorisMetrics::instance()->metric_registry()->register_entity( | 
| 679 | 864 |             std::string("lru_cache:") + name, {{"name", name}}); | 
| 680 | 864 |     _entity->register_hook(name, std::bind(&ShardedLRUCache::update_cache_metrics, this)); | 
| 681 | 864 |     INT_GAUGE_METRIC_REGISTER(_entity, cache_capacity); | 
| 682 | 864 |     INT_GAUGE_METRIC_REGISTER(_entity, cache_usage); | 
| 683 | 864 |     INT_GAUGE_METRIC_REGISTER(_entity, cache_element_count); | 
| 684 | 864 |     DOUBLE_GAUGE_METRIC_REGISTER(_entity, cache_usage_ratio); | 
| 685 | 864 |     INT_COUNTER_METRIC_REGISTER(_entity, cache_lookup_count); | 
| 686 | 864 |     INT_COUNTER_METRIC_REGISTER(_entity, cache_hit_count); | 
| 687 | 864 |     INT_COUNTER_METRIC_REGISTER(_entity, cache_stampede_count); | 
| 688 | 864 |     INT_COUNTER_METRIC_REGISTER(_entity, cache_miss_count); | 
| 689 | 864 |     DOUBLE_GAUGE_METRIC_REGISTER(_entity, cache_hit_ratio); | 
| 690 |  |  | 
| 691 | 864 |     _hit_count_bvar.reset(new bvar::Adder<uint64_t>("doris_cache", _name)); | 
| 692 | 864 |     _hit_count_per_second.reset(new bvar::PerSecond<bvar::Adder<uint64_t>>( | 
| 693 | 864 |             "doris_cache", _name + "_persecond", _hit_count_bvar.get(), 60)); | 
| 694 | 864 |     _lookup_count_bvar.reset(new bvar::Adder<uint64_t>("doris_cache", _name)); | 
| 695 | 864 |     _lookup_count_per_second.reset(new bvar::PerSecond<bvar::Adder<uint64_t>>( | 
| 696 | 864 |             "doris_cache", _name + "_persecond", _lookup_count_bvar.get(), 60)); | 
| 697 | 864 | } | 
| 698 |  |  | 
| 699 |  | ShardedLRUCache::ShardedLRUCache(const std::string& name, size_t capacity, LRUCacheType type, | 
| 700 |  |                                  uint32_t num_shards, | 
| 701 |  |                                  CacheValueTimeExtractor cache_value_time_extractor, | 
| 702 |  |                                  bool cache_value_check_timestamp, | 
| 703 |  |                                  uint32_t total_element_count_capacity, bool is_lru_k) | 
| 704 | 114 |         : ShardedLRUCache(name, capacity, type, num_shards, total_element_count_capacity, | 
| 705 | 114 |                           is_lru_k) { | 
| 706 | 228 |     for (int s = 0; s < _num_shards; s++) { | 
| 707 | 114 |         _shards[s]->set_cache_value_time_extractor(cache_value_time_extractor); | 
| 708 | 114 |         _shards[s]->set_cache_value_check_timestamp(cache_value_check_timestamp); | 
| 709 | 114 |     } | 
| 710 | 114 | } | 
| 711 |  |  | 
| 712 | 857 | ShardedLRUCache::~ShardedLRUCache() { | 
| 713 | 857 |     _entity->deregister_hook(_name); | 
| 714 | 857 |     DorisMetrics::instance()->metric_registry()->deregister_entity(_entity); | 
| 715 | 857 |     if (_shards) { | 
| 716 | 12.7k |         for (int s = 0; s < _num_shards; s++) { | 
| 717 | 11.8k |             delete _shards[s]; | 
| 718 | 11.8k |         } | 
| 719 | 857 |         delete[] _shards; | 
| 720 | 857 |     } | 
| 721 | 857 | } | 
| 722 |  |  | 
| 723 | 14 | PrunedInfo ShardedLRUCache::set_capacity(size_t capacity) { | 
| 724 | 14 |     std::lock_guard l(_mutex); | 
| 725 | 14 |     PrunedInfo pruned_info; | 
| 726 | 14 |     const size_t per_shard = (capacity + (_num_shards - 1)) / _num_shards; | 
| 727 | 28 |     for (int s = 0; s < _num_shards; s++) { | 
| 728 | 14 |         PrunedInfo info = _shards[s]->set_capacity(per_shard); | 
| 729 | 14 |         pruned_info.pruned_count += info.pruned_count; | 
| 730 | 14 |         pruned_info.pruned_size += info.pruned_size; | 
| 731 | 14 |     } | 
| 732 | 14 |     _capacity = capacity; | 
| 733 | 14 |     return pruned_info; | 
| 734 | 14 | } | 
| 735 |  |  | 
| 736 | 16.0k | size_t ShardedLRUCache::get_capacity() { | 
| 737 | 16.0k |     std::lock_guard l(_mutex); | 
| 738 | 16.0k |     return _capacity; | 
| 739 | 16.0k | } | 
| 740 |  |  | 
| 741 |  | Cache::Handle* ShardedLRUCache::insert(const CacheKey& key, void* value, size_t charge, | 
| 742 | 327k |                                        CachePriority priority) { | 
| 743 | 327k |     const uint32_t hash = _hash_slice(key); | 
| 744 | 327k |     return _shards[_shard(hash)]->insert(key, hash, value, charge, priority); | 
| 745 | 327k | } | 
| 746 |  |  | 
| 747 | 352k | Cache::Handle* ShardedLRUCache::lookup(const CacheKey& key) { | 
| 748 | 352k |     const uint32_t hash = _hash_slice(key); | 
| 749 | 352k |     return _shards[_shard(hash)]->lookup(key, hash); | 
| 750 | 352k | } | 
| 751 |  |  | 
| 752 | 631k | void ShardedLRUCache::release(Handle* handle) { | 
| 753 | 631k |     auto* h = reinterpret_cast<LRUHandle*>(handle); | 
| 754 | 631k |     _shards[_shard(h->hash)]->release(handle); | 
| 755 | 631k | } | 
| 756 |  |  | 
| 757 | 15 | void ShardedLRUCache::erase(const CacheKey& key) { | 
| 758 | 15 |     const uint32_t hash = _hash_slice(key); | 
| 759 | 15 |     _shards[_shard(hash)]->erase(key, hash); | 
| 760 | 15 | } | 
| 761 |  |  | 
| 762 | 313k | void* ShardedLRUCache::value(Handle* handle) { | 
| 763 | 313k |     return reinterpret_cast<LRUHandle*>(handle)->value; | 
| 764 | 313k | } | 
| 765 |  |  | 
| 766 | 2 | uint64_t ShardedLRUCache::new_id() { | 
| 767 | 2 |     return _last_id.fetch_add(1, std::memory_order_relaxed); | 
| 768 | 2 | } | 
| 769 |  |  | 
| 770 | 0 | PrunedInfo ShardedLRUCache::prune() { | 
| 771 | 0 |     PrunedInfo pruned_info; | 
| 772 | 0 |     for (int s = 0; s < _num_shards; s++) { | 
| 773 | 0 |         PrunedInfo info = _shards[s]->prune(); | 
| 774 | 0 |         pruned_info.pruned_count += info.pruned_count; | 
| 775 | 0 |         pruned_info.pruned_size += info.pruned_size; | 
| 776 | 0 |     } | 
| 777 | 0 |     return pruned_info; | 
| 778 | 0 | } | 
| 779 |  |  | 
| 780 | 0 | PrunedInfo ShardedLRUCache::prune_if(CachePrunePredicate pred, bool lazy_mode) { | 
| 781 | 0 |     PrunedInfo pruned_info; | 
| 782 | 0 |     for (int s = 0; s < _num_shards; s++) { | 
| 783 | 0 |         PrunedInfo info = _shards[s]->prune_if(pred, lazy_mode); | 
| 784 | 0 |         pruned_info.pruned_count += info.pruned_count; | 
| 785 | 0 |         pruned_info.pruned_size += info.pruned_size; | 
| 786 | 0 |     } | 
| 787 | 0 |     return pruned_info; | 
| 788 | 0 | } | 
| 789 |  |  | 
| 790 | 0 | void ShardedLRUCache::for_each_entry(const std::function<void(const LRUHandle*)>& visitor) { | 
| 791 | 0 |     for (int s = 0; s < _num_shards; s++) { | 
| 792 | 0 |         _shards[s]->for_each_entry(visitor); | 
| 793 | 0 |     } | 
| 794 | 0 | } | 
| 795 |  |  | 
| 796 | 16.0k | int64_t ShardedLRUCache::get_usage() { | 
| 797 | 16.0k |     size_t total_usage = 0; | 
| 798 | 32.1k |     for (int i = 0; i < _num_shards; i++) { | 
| 799 | 16.0k |         total_usage += _shards[i]->get_usage(); | 
| 800 | 16.0k |     } | 
| 801 | 16.0k |     return total_usage; | 
| 802 | 16.0k | } | 
| 803 |  |  | 
| 804 | 10 | size_t ShardedLRUCache::get_element_count() { | 
| 805 | 10 |     size_t total_element_count = 0; | 
| 806 | 202 |     for (int i = 0; i < _num_shards; i++) { | 
| 807 | 192 |         total_element_count += _shards[i]->get_element_count(); | 
| 808 | 192 |     } | 
| 809 | 10 |     return total_element_count; | 
| 810 | 10 | } | 
| 811 |  |  | 
| 812 | 0 | void ShardedLRUCache::update_cache_metrics() const { | 
| 813 | 0 |     size_t capacity = 0; | 
| 814 | 0 |     size_t total_usage = 0; | 
| 815 | 0 |     size_t total_lookup_count = 0; | 
| 816 | 0 |     size_t total_hit_count = 0; | 
| 817 | 0 |     size_t total_element_count = 0; | 
| 818 | 0 |     size_t total_miss_count = 0; | 
| 819 | 0 |     size_t total_stampede_count = 0; | 
| 820 |  | 
 | 
| 821 | 0 |     for (int i = 0; i < _num_shards; i++) { | 
| 822 | 0 |         capacity += _shards[i]->get_capacity(); | 
| 823 | 0 |         total_usage += _shards[i]->get_usage(); | 
| 824 | 0 |         total_lookup_count += _shards[i]->get_lookup_count(); | 
| 825 | 0 |         total_hit_count += _shards[i]->get_hit_count(); | 
| 826 | 0 |         total_element_count += _shards[i]->get_element_count(); | 
| 827 | 0 |         total_miss_count += _shards[i]->get_miss_count(); | 
| 828 | 0 |         total_stampede_count += _shards[i]->get_stampede_count(); | 
| 829 | 0 |     } | 
| 830 |  | 
 | 
| 831 | 0 |     cache_capacity->set_value(capacity); | 
| 832 | 0 |     cache_usage->set_value(total_usage); | 
| 833 | 0 |     cache_element_count->set_value(total_element_count); | 
| 834 | 0 |     cache_lookup_count->set_value(total_lookup_count); | 
| 835 | 0 |     cache_hit_count->set_value(total_hit_count); | 
| 836 | 0 |     cache_miss_count->set_value(total_miss_count); | 
| 837 | 0 |     cache_stampede_count->set_value(total_stampede_count); | 
| 838 | 0 |     cache_usage_ratio->set_value( | 
| 839 | 0 |             capacity == 0 ? 0 : (static_cast<double>(total_usage) / static_cast<double>(capacity))); | 
| 840 | 0 |     cache_hit_ratio->set_value(total_lookup_count == 0 ? 0 | 
| 841 | 0 |                                                        : (static_cast<double>(total_hit_count) / | 
| 842 | 0 |                                                           static_cast<double>(total_lookup_count))); | 
| 843 | 0 | } | 
| 844 |  |  | 
| 845 |  | Cache::Handle* DummyLRUCache::insert(const CacheKey& key, void* value, size_t charge, | 
| 846 | 181 |                                      CachePriority priority) { | 
| 847 | 181 |     size_t handle_size = sizeof(LRUHandle); | 
| 848 | 181 |     auto* e = reinterpret_cast<LRUHandle*>(malloc(handle_size)); | 
| 849 | 181 |     e->value = value; | 
| 850 | 181 |     e->charge = charge; | 
| 851 | 181 |     e->key_length = 0; | 
| 852 | 181 |     e->total_size = 0; | 
| 853 | 181 |     e->hash = 0; | 
| 854 | 181 |     e->refs = 1; // only one for the returned handle | 
| 855 | 181 |     e->next = e->prev = nullptr; | 
| 856 | 181 |     e->in_cache = false; | 
| 857 | 181 |     return reinterpret_cast<Cache::Handle*>(e); | 
| 858 | 181 | } | 
| 859 |  |  | 
| 860 | 181 | void DummyLRUCache::release(Cache::Handle* handle) { | 
| 861 | 181 |     if (handle == nullptr) { | 
| 862 | 0 |         return; | 
| 863 | 0 |     } | 
| 864 | 181 |     auto* e = reinterpret_cast<LRUHandle*>(handle); | 
| 865 | 181 |     e->free(); | 
| 866 | 181 | } | 
| 867 |  |  | 
| 868 | 0 | void* DummyLRUCache::value(Handle* handle) { | 
| 869 | 0 |     return reinterpret_cast<LRUHandle*>(handle)->value; | 
| 870 | 0 | } | 
| 871 |  |  | 
| 872 |  | } // namespace doris |