Coverage Report

Created: 2026-04-17 23:17

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
be/src/util/tdigest.h
Line
Count
Source
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
18
/*
19
 * Licensed to Derrick R. Burns under one or more
20
 * contributor license agreements.  See the NOTICES file distributed with
21
 * this work for additional information regarding copyright ownership.
22
 * The ASF licenses this file to You under the Apache License, Version 2.0
23
 * (the "License"); you may not use this file except in compliance with
24
 * the License.  You may obtain a copy of the License at
25
 *
26
 *     http://www.apache.org/licenses/LICENSE-2.0
27
 *
28
 * Unless required by applicable law or agreed to in writing, software
29
 * distributed under the License is distributed on an "AS IS" BASIS,
30
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
31
 * See the License for the specific language governing permissions and
32
 * limitations under the License.
33
 */
34
35
// T-Digest :  Percentile and Quantile Estimation of Big Data
36
// A new data structure for accurate on-line accumulation of rank-based statistics
37
// such as quantiles and trimmed means.
38
// See original paper: "Computing extremely accurate quantiles using t-digest"
39
// by Ted Dunning and Otmar Ertl for more details
40
// https://github.com/tdunning/t-digest/blob/07b8f2ca2be8d0a9f04df2feadad5ddc1bb73c88/docs/t-digest-paper/histo.pdf.
41
// https://github.com/derrickburns/tdigest
42
43
#pragma once
44
45
#include <pdqsort.h>
46
47
#include <algorithm>
48
#include <cfloat>
49
#include <cmath>
50
#include <iostream>
51
#include <memory>
52
#include <queue>
53
#include <utility>
54
#include <vector>
55
56
#include "common/factory_creator.h"
57
#include "common/logging.h"
58
59
namespace doris {
60
61
using Value = float;
62
using Weight = float;
63
using Index = size_t;
64
65
const size_t kHighWater = 40000;
66
67
class Centroid {
68
public:
69
2.04k
    Centroid() : Centroid(0.0, 0.0) {}
70
71
18.2k
    Centroid(Value mean, Weight weight) : _mean(mean), _weight(weight) {}
72
73
317k
    Value mean() const noexcept { return _mean; }
74
75
17.1k
    Weight weight() const noexcept { return _weight; }
76
77
2.39k
    Value& mean() noexcept { return _mean; }
78
79
1.20k
    Weight& weight() noexcept { return _weight; }
80
81
9.79k
    void add(const Centroid& c) {
82
9.79k
        DCHECK_GT(c._weight, 0);
83
9.79k
        if (_weight != 0.0) {
84
9.79k
            _weight += c._weight;
85
9.79k
            _mean += c._weight * (c._mean - _mean) / _weight;
86
9.79k
        } else {
87
0
            _weight = c._weight;
88
0
            _mean = c._mean;
89
0
        }
90
9.79k
    }
91
92
private:
93
    Value _mean = 0;
94
    Weight _weight = 0;
95
};
96
97
struct CentroidList {
98
1
    CentroidList(const std::vector<Centroid>& s) : iter(s.cbegin()), end(s.cend()) {}
99
    std::vector<Centroid>::const_iterator iter;
100
    std::vector<Centroid>::const_iterator end;
101
102
100
    bool advance() { return ++iter != end; }
103
};
104
105
class CentroidListComparator {
106
public:
107
    CentroidListComparator() = default;
108
109
0
    bool operator()(const CentroidList& left, const CentroidList& right) const {
110
0
        return left.iter->mean() > right.iter->mean();
111
0
    }
112
};
113
114
using CentroidListQueue =
115
        std::priority_queue<CentroidList, std::vector<CentroidList>, CentroidListComparator>;
116
117
struct CentroidComparator {
118
158k
    bool operator()(const Centroid& a, const Centroid& b) const { return a.mean() < b.mean(); }
119
};
120
121
class TDigest {
122
    ENABLE_FACTORY_CREATOR(TDigest);
123
124
    class TDigestComparator {
125
    public:
126
        TDigestComparator() = default;
127
128
0
        bool operator()(const TDigest* left, const TDigest* right) const {
129
0
            return left->totalSize() > right->totalSize();
130
0
        }
131
    };
132
    using TDigestQueue =
133
            std::priority_queue<const TDigest*, std::vector<const TDigest*>, TDigestComparator>;
134
135
public:
136
0
    TDigest() : TDigest(10000) {}
137
138
14
    explicit TDigest(Value compression) : TDigest(compression, 0) {}
139
140
14
    TDigest(Value compression, Index bufferSize) : TDigest(compression, bufferSize, 0) {}
141
142
    TDigest(Value compression, Index unmergedSize, Index mergedSize)
143
14
            : _compression(compression),
144
14
              _max_processed(processedSize(mergedSize, compression)),
145
14
              _max_unprocessed(unprocessedSize(unmergedSize, compression)) {
146
14
        _processed.reserve(_max_processed);
147
14
        _unprocessed.reserve(_max_unprocessed + 1);
148
14
    }
149
150
    TDigest(std::vector<Centroid>&& processed, std::vector<Centroid>&& unprocessed,
151
            Value compression, Index unmergedSize, Index mergedSize)
152
0
            : TDigest(compression, unmergedSize, mergedSize) {
153
0
        _processed = std::move(processed);
154
0
        _unprocessed = std::move(unprocessed);
155
0
156
0
        _processed_weight = weight(_processed);
157
0
        _unprocessed_weight = weight(_unprocessed);
158
0
        if (_processed.size() > 0) {
159
0
            _min = std::min(_min, _processed[0].mean());
160
0
            _max = std::max(_max, (_processed.cend() - 1)->mean());
161
0
        }
162
0
        updateCumulative();
163
0
    }
164
165
0
    static Weight weight(std::vector<Centroid>& centroids) noexcept {
166
0
        Weight w = 0.0;
167
0
        for (auto centroid : centroids) {
168
0
            w += centroid.weight();
169
0
        }
170
0
        return w;
171
0
    }
172
173
0
    TDigest& operator=(TDigest&& o) {
174
0
        _compression = o._compression;
175
0
        _max_processed = o._max_processed;
176
0
        _max_unprocessed = o._max_unprocessed;
177
0
        _processed_weight = o._processed_weight;
178
0
        _unprocessed_weight = o._unprocessed_weight;
179
0
        _processed = std::move(o._processed);
180
0
        _unprocessed = std::move(o._unprocessed);
181
0
        _cumulative = std::move(o._cumulative);
182
0
        _min = o._min;
183
0
        _max = o._max;
184
0
        return *this;
185
0
    }
186
187
    TDigest(TDigest&& o)
188
            : TDigest(std::move(o._processed), std::move(o._unprocessed), o._compression,
189
0
                      o._max_unprocessed, o._max_processed) {}
190
191
14
    static inline Index processedSize(Index size, Value compression) noexcept {
192
14
        return (size == 0) ? static_cast<Index>(2 * std::ceil(compression)) : size;
193
14
    }
194
195
14
    static inline Index unprocessedSize(Index size, Value compression) noexcept {
196
14
        return (size == 0) ? static_cast<Index>(8 * std::ceil(compression)) : size;
197
14
    }
198
199
    // merge in another t-digest
200
1
    void merge(const TDigest* other) {
201
1
        std::vector<const TDigest*> others {other};
202
1
        add(others.cbegin(), others.cend());
203
1
    }
204
205
3
    const std::vector<Centroid>& processed() const { return _processed; }
206
207
0
    const std::vector<Centroid>& unprocessed() const { return _unprocessed; }
208
209
0
    Index maxUnprocessed() const { return _max_unprocessed; }
210
211
0
    Index maxProcessed() const { return _max_processed; }
212
213
1
    void add(std::vector<const TDigest*> digests) { add(digests.cbegin(), digests.cend()); }
214
215
    // merge in a vector of tdigests in the most efficient manner possible
216
    // in constant space
217
    // works for any value of kHighWater
218
    void add(std::vector<const TDigest*>::const_iterator iter,
219
2
             std::vector<const TDigest*>::const_iterator end) {
220
2
        if (iter != end) {
221
2
            auto size = std::distance(iter, end);
222
2
            TDigestQueue pq(TDigestComparator {});
223
4
            for (; iter != end; iter++) {
224
2
                pq.push((*iter));
225
2
            }
226
2
            std::vector<const TDigest*> batch;
227
2
            batch.reserve(size);
228
229
2
            size_t totalSize = 0;
230
4
            while (!pq.empty()) {
231
2
                const auto* td = pq.top();
232
2
                batch.push_back(td);
233
2
                pq.pop();
234
2
                totalSize += td->totalSize();
235
2
                if (totalSize >= kHighWater || pq.empty()) {
236
2
                    mergeProcessed(batch);
237
2
                    mergeUnprocessed(batch);
238
2
                    processIfNecessary();
239
2
                    batch.clear();
240
2
                    totalSize = 0;
241
2
                }
242
2
            }
243
2
            updateCumulative();
244
2
        }
245
2
    }
246
247
0
    Weight processedWeight() const { return _processed_weight; }
248
249
0
    Weight unprocessedWeight() const { return _unprocessed_weight; }
250
251
41
    bool haveUnprocessed() const { return _unprocessed.size() > 0; }
252
253
2
    size_t totalSize() const { return _processed.size() + _unprocessed.size(); }
254
255
2
    long totalWeight() const { return static_cast<long>(_processed_weight + _unprocessed_weight); }
256
257
    // return the cdf on the t-digest
258
11
    Value cdf(Value x) {
259
11
        if (haveUnprocessed() || isDirty()) {
260
0
            process();
261
0
        }
262
11
        return cdfProcessed(x);
263
11
    }
264
265
16.2k
    bool isDirty() {
266
16.2k
        return _processed.size() > _max_processed || _unprocessed.size() > _max_unprocessed;
267
16.2k
    }
268
269
    // return the cdf on the processed values
270
11
    Value cdfProcessed(Value x) const {
271
11
        VLOG_CRITICAL << "cdf value " << x;
272
11
        VLOG_CRITICAL << "processed size " << _processed.size();
273
11
        if (_processed.size() == 0) {
274
            // no data to examine
275
0
            VLOG_CRITICAL << "no processed values";
276
277
0
            return 0.0;
278
11
        } else if (_processed.size() == 1) {
279
0
            VLOG_CRITICAL << "one processed value "
280
0
                          << " _min " << _min << " _max " << _max;
281
            // exactly one centroid, should have _max==_min
282
0
            auto width = _max - _min;
283
0
            if (x < _min) {
284
0
                return 0.0;
285
0
            } else if (x > _max) {
286
0
                return 1.0;
287
0
            } else if (x - _min <= width) {
288
                // _min and _max are too close together to do any viable interpolation
289
0
                return 0.5;
290
0
            } else {
291
                // interpolate if somehow we have weight > 0 and _max != _min
292
0
                return (x - _min) / (_max - _min);
293
0
            }
294
11
        } else {
295
11
            auto n = _processed.size();
296
11
            if (x <= _min) {
297
2
                VLOG_CRITICAL << "below _min "
298
2
                              << " _min " << _min << " x " << x;
299
2
                return 0;
300
2
            }
301
302
9
            if (x >= _max) {
303
1
                VLOG_CRITICAL << "above _max "
304
1
                              << " _max " << _max << " x " << x;
305
1
                return 1;
306
1
            }
307
308
            // check for the left tail
309
8
            if (x <= mean(0)) {
310
0
                VLOG_CRITICAL << "left tail "
311
0
                              << " _min " << _min << " mean(0) " << mean(0) << " x " << x;
312
313
                // note that this is different than mean(0) > _min ... this guarantees interpolation works
314
0
                if (mean(0) - _min > 0) {
315
0
                    return static_cast<Value>((x - _min) / (mean(0) - _min) * weight(0) /
316
0
                                              _processed_weight / 2.0);
317
0
                } else {
318
0
                    return 0;
319
0
                }
320
0
            }
321
322
            // and the right tail
323
8
            if (x >= mean(n - 1)) {
324
0
                VLOG_CRITICAL << "right tail"
325
0
                              << " _max " << _max << " mean(n - 1) " << mean(n - 1) << " x " << x;
326
327
0
                if (_max - mean(n - 1) > 0) {
328
0
                    return static_cast<Value>(1.0 - (_max - x) / (_max - mean(n - 1)) *
329
0
                                                            weight(n - 1) / _processed_weight /
330
0
                                                            2.0);
331
0
                } else {
332
0
                    return 1;
333
0
                }
334
0
            }
335
336
8
            CentroidComparator cc;
337
8
            auto iter =
338
8
                    std::upper_bound(_processed.cbegin(), _processed.cend(), Centroid(x, 0), cc);
339
340
8
            auto i = std::distance(_processed.cbegin(), iter);
341
8
            auto z1 = x - (iter - 1)->mean();
342
8
            auto z2 = (iter)->mean() - x;
343
8
            DCHECK_LE(0.0, z1);
344
8
            DCHECK_LE(0.0, z2);
345
8
            VLOG_CRITICAL << "middle "
346
8
                          << " z1 " << z1 << " z2 " << z2 << " x " << x;
347
348
8
            return weightedAverage(_cumulative[i - 1], z2, _cumulative[i], z1) / _processed_weight;
349
8
        }
350
11
    }
351
352
    // this returns a quantile on the t-digest
353
30
    Value quantile(Value q) {
354
30
        if (haveUnprocessed() || isDirty()) {
355
4
            process();
356
4
        }
357
30
        return quantileProcessed(q);
358
30
    }
359
360
    // this returns a quantile on the currently processed values without changing the t-digest
361
    // the value will not represent the unprocessed values
362
30
    Value quantileProcessed(Value q) const {
363
30
        if (q < 0 || q > 1) {
364
0
            VLOG_CRITICAL << "q should be in [0,1], got " << q;
365
0
            return NAN;
366
0
        }
367
368
30
        if (_processed.size() == 0) {
369
            // no sorted means no data, no way to get a quantile
370
0
            return NAN;
371
30
        } else if (_processed.size() == 1) {
372
            // with one data point, all quantiles lead to Rome
373
374
3
            return mean(0);
375
3
        }
376
377
        // we know that there are at least two sorted now
378
27
        auto n = _processed.size();
379
380
        // if values were stored in a sorted array, index would be the offset we are Weighterested in
381
27
        const auto index = q * _processed_weight;
382
383
        // at the boundaries, we return _min or _max
384
27
        if (index <= weight(0) / 2.0) {
385
5
            DCHECK_GT(weight(0), 0);
386
5
            return static_cast<Value>(_min + 2.0 * index / weight(0) * (mean(0) - _min));
387
5
        }
388
389
22
        auto iter = std::lower_bound(_cumulative.cbegin(), _cumulative.cend(), index);
390
391
22
        if (iter != _cumulative.cend() && iter != _cumulative.cbegin() &&
392
22
            iter + 1 != _cumulative.cend()) {
393
18
            auto i = std::distance(_cumulative.cbegin(), iter);
394
18
            auto z1 = index - *(iter - 1);
395
18
            auto z2 = *(iter)-index;
396
            // VLOG_CRITICAL << "z2 " << z2 << " index " << index << " z1 " << z1;
397
18
            return weightedAverage(mean(i - 1), z2, mean(i), z1);
398
18
        }
399
400
22
        DCHECK_LE(index, _processed_weight);
401
4
        DCHECK_GE(index, _processed_weight - weight(n - 1) / 2.0);
402
403
4
        auto z1 = static_cast<Value>(index - _processed_weight - weight(n - 1) / 2.0);
404
4
        auto z2 = static_cast<Value>(weight(n - 1) / 2 - z1);
405
4
        return weightedAverage(mean(n - 1), z1, _max, z2);
406
22
    }
407
408
0
    Value compression() const { return _compression; }
409
410
6.16k
    void add(Value x) { add(x, 1); }
411
412
3
    void compress() { process(); }
413
414
    // add a single centroid to the unprocessed vector, processing previously unprocessed sorted if our limit has
415
    // been reached.
416
16.1k
    bool add(Value x, Weight w) {
417
16.1k
        if (std::isnan(x)) {
418
0
            return false;
419
0
        }
420
16.1k
        _unprocessed.emplace_back(x, w);
421
16.1k
        _unprocessed_weight += w;
422
16.1k
        processIfNecessary();
423
16.1k
        return true;
424
16.1k
    }
425
426
    void add(std::vector<Centroid>::const_iterator iter,
427
0
             std::vector<Centroid>::const_iterator end) {
428
0
        while (iter != end) {
429
0
            const size_t diff = std::distance(iter, end);
430
0
            const size_t room = _max_unprocessed - _unprocessed.size();
431
0
            auto mid = iter + std::min(diff, room);
432
0
            while (iter != mid) {
433
0
                _unprocessed.push_back(*(iter++));
434
0
            }
435
0
            if (_unprocessed.size() >= _max_unprocessed) {
436
0
                process();
437
0
            }
438
0
        }
439
0
    }
440
441
9
    uint32_t serialized_size() {
442
9
        return static_cast<uint32_t>(sizeof(uint32_t) + sizeof(Value) * 5 + sizeof(Index) * 2 +
443
9
                                     sizeof(uint32_t) * 3 + _processed.size() * sizeof(Centroid) +
444
9
                                     _unprocessed.size() * sizeof(Centroid) +
445
9
                                     _cumulative.size() * sizeof(Weight));
446
9
    }
447
448
3
    size_t serialize(uint8_t* writer) {
449
3
        uint8_t* dst = writer;
450
3
        uint32_t total_size = serialized_size();
451
3
        memcpy(writer, &total_size, sizeof(uint32_t));
452
3
        writer += sizeof(uint32_t);
453
3
        memcpy(writer, &_compression, sizeof(Value));
454
3
        writer += sizeof(Value);
455
3
        memcpy(writer, &_min, sizeof(Value));
456
3
        writer += sizeof(Value);
457
3
        memcpy(writer, &_max, sizeof(Value));
458
3
        writer += sizeof(Value);
459
3
        memcpy(writer, &_max_processed, sizeof(Index));
460
3
        writer += sizeof(Index);
461
3
        memcpy(writer, &_max_unprocessed, sizeof(Index));
462
3
        writer += sizeof(Index);
463
3
        memcpy(writer, &_processed_weight, sizeof(Value));
464
3
        writer += sizeof(Value);
465
3
        memcpy(writer, &_unprocessed_weight, sizeof(Value));
466
3
        writer += sizeof(Value);
467
468
3
        auto size = static_cast<uint32_t>(_processed.size());
469
3
        memcpy(writer, &size, sizeof(uint32_t));
470
3
        writer += sizeof(uint32_t);
471
3
        for (int i = 0; i < size; i++) {
472
0
            memcpy(writer, &_processed[i], sizeof(Centroid));
473
0
            writer += sizeof(Centroid);
474
0
        }
475
476
3
        size = static_cast<uint32_t>(_unprocessed.size());
477
3
        memcpy(writer, &size, sizeof(uint32_t));
478
3
        writer += sizeof(uint32_t);
479
        //TODO(weixiang): may be once memcpy is enough!
480
8.05k
        for (int i = 0; i < size; i++) {
481
8.04k
            memcpy(writer, &_unprocessed[i], sizeof(Centroid));
482
8.04k
            writer += sizeof(Centroid);
483
8.04k
        }
484
485
3
        size = static_cast<uint32_t>(_cumulative.size());
486
3
        memcpy(writer, &size, sizeof(uint32_t));
487
3
        writer += sizeof(uint32_t);
488
3
        for (int i = 0; i < size; i++) {
489
0
            memcpy(writer, &_cumulative[i], sizeof(Weight));
490
0
            writer += sizeof(Weight);
491
0
        }
492
3
        return writer - dst;
493
3
    }
494
495
1
    void unserialize(const uint8_t* type_reader) {
496
1
        uint32_t total_length = 0;
497
1
        memcpy(&total_length, type_reader, sizeof(uint32_t));
498
1
        type_reader += sizeof(uint32_t);
499
1
        memcpy(&_compression, type_reader, sizeof(Value));
500
1
        type_reader += sizeof(Value);
501
1
        memcpy(&_min, type_reader, sizeof(Value));
502
1
        type_reader += sizeof(Value);
503
1
        memcpy(&_max, type_reader, sizeof(Value));
504
1
        type_reader += sizeof(Value);
505
506
1
        memcpy(&_max_processed, type_reader, sizeof(Index));
507
1
        type_reader += sizeof(Index);
508
1
        memcpy(&_max_unprocessed, type_reader, sizeof(Index));
509
1
        type_reader += sizeof(Index);
510
1
        memcpy(&_processed_weight, type_reader, sizeof(Value));
511
1
        type_reader += sizeof(Value);
512
1
        memcpy(&_unprocessed_weight, type_reader, sizeof(Value));
513
1
        type_reader += sizeof(Value);
514
515
1
        uint32_t size;
516
1
        memcpy(&size, type_reader, sizeof(uint32_t));
517
1
        type_reader += sizeof(uint32_t);
518
1
        _processed.resize(size);
519
1
        for (int i = 0; i < size; i++) {
520
0
            memcpy(&_processed[i], type_reader, sizeof(Centroid));
521
0
            type_reader += sizeof(Centroid);
522
0
        }
523
1
        memcpy(&size, type_reader, sizeof(uint32_t));
524
1
        type_reader += sizeof(uint32_t);
525
1
        _unprocessed.resize(size);
526
2.05k
        for (int i = 0; i < size; i++) {
527
2.04k
            memcpy(&_unprocessed[i], type_reader, sizeof(Centroid));
528
2.04k
            type_reader += sizeof(Centroid);
529
2.04k
        }
530
1
        memcpy(&size, type_reader, sizeof(uint32_t));
531
1
        type_reader += sizeof(uint32_t);
532
1
        _cumulative.resize(size);
533
1
        for (int i = 0; i < size; i++) {
534
0
            memcpy(&_cumulative[i], type_reader, sizeof(Weight));
535
0
            type_reader += sizeof(Weight);
536
0
        }
537
1
    }
538
539
private:
540
    Value _compression;
541
542
    Value _min = std::numeric_limits<Value>::max();
543
544
    Value _max = std::numeric_limits<Value>::min();
545
546
    Index _max_processed;
547
548
    Index _max_unprocessed;
549
550
    Value _processed_weight = 0.0;
551
552
    Value _unprocessed_weight = 0.0;
553
554
    std::vector<Centroid> _processed;
555
556
    std::vector<Centroid> _unprocessed;
557
558
    std::vector<Weight> _cumulative;
559
560
    // return mean of i-th centroid
561
64
    Value mean(int64_t i) const noexcept { return _processed[i].mean(); }
562
563
    // return weight of i-th centroid
564
2.56k
    Weight weight(int64_t i) const noexcept { return _processed[i].weight(); }
565
566
    // append all unprocessed centroids into current unprocessed vector
567
2
    void mergeUnprocessed(const std::vector<const TDigest*>& tdigests) {
568
2
        if (tdigests.size() == 0) {
569
0
            return;
570
0
        }
571
572
2
        size_t total = _unprocessed.size();
573
2
        for (const auto& td : tdigests) {
574
2
            total += td->_unprocessed.size();
575
2
        }
576
577
2
        _unprocessed.reserve(total);
578
2
        for (const auto& td : tdigests) {
579
2
            _unprocessed.insert(_unprocessed.end(), td->_unprocessed.cbegin(),
580
2
                                td->_unprocessed.cend());
581
2
            _unprocessed_weight += td->_unprocessed_weight;
582
2
        }
583
2
    }
584
585
    // merge all processed centroids together into a single sorted vector
586
2
    void mergeProcessed(const std::vector<const TDigest*>& tdigests) {
587
2
        if (tdigests.size() == 0) {
588
0
            return;
589
0
        }
590
591
2
        size_t total = 0;
592
2
        CentroidListQueue pq(CentroidListComparator {});
593
2
        for (const auto& td : tdigests) {
594
2
            const auto& sorted = td->_processed;
595
2
            auto size = sorted.size();
596
2
            if (size > 0) {
597
1
                pq.push(CentroidList(sorted));
598
1
                total += size;
599
1
                _processed_weight += td->_processed_weight;
600
1
            }
601
2
        }
602
2
        if (total == 0) {
603
1
            return;
604
1
        }
605
606
1
        if (_processed.size() > 0) {
607
0
            pq.push(CentroidList(_processed));
608
0
            total += _processed.size();
609
0
        }
610
611
1
        std::vector<Centroid> sorted;
612
1
        VLOG_CRITICAL << "total " << total;
613
1
        sorted.reserve(total);
614
615
101
        while (!pq.empty()) {
616
100
            auto best = pq.top();
617
100
            pq.pop();
618
100
            sorted.push_back(*(best.iter));
619
100
            if (best.advance()) {
620
99
                pq.push(best);
621
99
            }
622
100
        }
623
1
        _processed = std::move(sorted);
624
1
        if (_processed.size() > 0) {
625
1
            _min = std::min(_min, _processed[0].mean());
626
1
            _max = std::max(_max, (_processed.cend() - 1)->mean());
627
1
        }
628
1
    }
629
630
16.1k
    void processIfNecessary() {
631
16.1k
        if (isDirty()) {
632
1
            process();
633
1
        }
634
16.1k
    }
635
636
10
    void updateCumulative() {
637
10
        const auto n = _processed.size();
638
10
        _cumulative.clear();
639
10
        _cumulative.reserve(n + 1);
640
10
        Weight previous = 0.0;
641
2.52k
        for (Index i = 0; i < n; i++) {
642
2.51k
            Weight current = weight(i);
643
2.51k
            auto halfCurrent = static_cast<Weight>(current / 2.0);
644
2.51k
            _cumulative.push_back(previous + halfCurrent);
645
2.51k
            previous = previous + current;
646
2.51k
        }
647
10
        _cumulative.push_back(previous);
648
10
    }
649
650
    // merges _unprocessed centroids and _processed centroids together and processes them
651
    // when complete, _unprocessed will be empty and _processed will have at most _max_processed centroids
652
8
    void process() {
653
8
        CentroidComparator cc;
654
        // select percentile_approx(lo_orderkey,0.5) from lineorder;
655
        // have test pdqsort and RadixSort, find here pdqsort performance is better when data is struct Centroid
656
        // But when sort plain type like int/float of std::vector<T>, find RadixSort is better
657
8
        pdqsort(_unprocessed.begin(), _unprocessed.end(), cc);
658
8
        auto count = _unprocessed.size();
659
8
        _unprocessed.insert(_unprocessed.end(), _processed.cbegin(), _processed.cend());
660
8
        std::inplace_merge(_unprocessed.begin(), _unprocessed.begin() + count, _unprocessed.end(),
661
8
                           cc);
662
663
8
        _processed_weight += _unprocessed_weight;
664
8
        _unprocessed_weight = 0;
665
8
        _processed.clear();
666
667
8
        _processed.push_back(_unprocessed[0]);
668
8
        Weight wSoFar = _unprocessed[0].weight();
669
8
        Weight wLimit = _processed_weight * integratedQ(1.0);
670
671
8
        auto end = _unprocessed.end();
672
12.2k
        for (auto iter = _unprocessed.cbegin() + 1; iter < end; iter++) {
673
12.2k
            const auto& centroid = *iter;
674
12.2k
            Weight projectedW = wSoFar + centroid.weight();
675
12.2k
            if (projectedW <= wLimit) {
676
9.79k
                wSoFar = projectedW;
677
9.79k
                (_processed.end() - 1)->add(centroid);
678
9.79k
            } else {
679
2.40k
                auto k1 = integratedLocation(wSoFar / _processed_weight);
680
2.40k
                wLimit = _processed_weight * integratedQ(static_cast<Value>(k1 + 1.0));
681
2.40k
                wSoFar += centroid.weight();
682
2.40k
                _processed.emplace_back(centroid);
683
2.40k
            }
684
12.2k
        }
685
8
        _unprocessed.clear();
686
8
        _min = std::min(_min, _processed[0].mean());
687
8
        VLOG_CRITICAL << "new _min " << _min;
688
8
        _max = std::max(_max, (_processed.cend() - 1)->mean());
689
8
        VLOG_CRITICAL << "new _max " << _max;
690
8
        updateCumulative();
691
8
    }
692
693
0
    size_t checkWeights(const std::vector<Centroid>& sorted, Value total) {
694
0
        size_t badWeight = 0;
695
0
        auto k1 = 0.0;
696
0
        auto q = 0.0;
697
0
        for (auto iter = sorted.cbegin(); iter != sorted.cend(); iter++) {
698
0
            auto w = iter->weight();
699
0
            auto dq = w / total;
700
0
            auto k2 = integratedLocation(static_cast<Value>(q + dq));
701
0
            if (k2 - k1 > 1 && w != 1) {
702
0
                VLOG_CRITICAL << "Oversize centroid at " << std::distance(sorted.cbegin(), iter)
703
0
                              << " k1 " << k1 << " k2 " << k2 << " dk " << (k2 - k1) << " w " << w
704
0
                              << " q " << q;
705
0
                badWeight++;
706
0
            }
707
0
            if (k2 - k1 > 1.5 && w != 1) {
708
0
                VLOG_CRITICAL << "Egregiously Oversize centroid at "
709
0
                              << std::distance(sorted.cbegin(), iter) << " k1 " << k1 << " k2 "
710
0
                              << k2 << " dk " << (k2 - k1) << " w " << w << " q " << q;
711
0
                badWeight++;
712
0
            }
713
0
            q += dq;
714
0
            k1 = k2;
715
0
        }
716
0
717
0
        return badWeight;
718
0
    }
719
720
    /**
721
    * Converts a quantile into a centroid scale value.  The centroid scale is nomin_ally
722
    * the number k of the centroid that a quantile point q should belong to.  Due to
723
    * round-offs, however, we can't align things perfectly without splitting points
724
    * and sorted.  We don't want to do that, so we have to allow for offsets.
725
    * In the end, the criterion is that any quantile range that spans a centroid
726
    * scale range more than one should be split across more than one centroid if
727
    * possible.  This won't be possible if the quantile range refers to a single point
728
    * or an already existing centroid.
729
    * <p/>
730
    * This mapping is steep near q=0 or q=1 so each centroid there will correspond to
731
    * less q range.  Near q=0.5, the mapping is flatter so that sorted there will
732
    * represent a larger chunk of quantiles.
733
    *
734
    * @param q The quantile scale value to be mapped.
735
    * @return The centroid scale value corresponding to q.
736
    */
737
2.40k
    Value integratedLocation(Value q) const {
738
2.40k
        return static_cast<Value>(_compression * (std::asin(2.0 * q - 1.0) + M_PI / 2) / M_PI);
739
2.40k
    }
740
741
2.41k
    Value integratedQ(Value k) const {
742
2.41k
        return static_cast<Value>(
743
2.41k
                (std::sin(std::min(k, _compression) * M_PI / _compression - M_PI / 2) + 1) / 2);
744
2.41k
    }
745
746
    /**
747
     * Same as {@link #weightedAverageSorted(Value, Value, Value, Value)} but flips
748
     * the order of the variables if <code>x2</code> is greater than
749
     * <code>x1</code>.
750
    */
751
30
    static Value weightedAverage(Value x1, Value w1, Value x2, Value w2) {
752
30
        return (x1 <= x2) ? weightedAverageSorted(x1, w1, x2, w2)
753
30
                          : weightedAverageSorted(x2, w2, x1, w1);
754
30
    }
755
756
    /**
757
    * Compute the weighted average between <code>x1</code> with a weight of
758
    * <code>w1</code> and <code>x2</code> with a weight of <code>w2</code>.
759
    * This expects <code>x1</code> to be less than or equal to <code>x2</code>
760
    * and is guaranteed to return a number between <code>x1</code> and
761
    * <code>x2</code>.
762
    */
763
30
    static Value weightedAverageSorted(Value x1, Value w1, Value x2, Value w2) {
764
30
        DCHECK_LE(x1, x2);
765
30
        const Value x = (x1 * w1 + x2 * w2) / (w1 + w2);
766
30
        return std::max(x1, std::min(x, x2));
767
30
    }
768
769
0
    static Value interpolate(Value x, Value x0, Value x1) { return (x - x0) / (x1 - x0); }
770
771
    /**
772
    * Computes an interpolated value of a quantile that is between two sorted.
773
    *
774
    * Index is the quantile desired multiplied by the total number of samples - 1.
775
    *
776
    * @param index              Denormalized quantile desired
777
    * @param previousIndex      The denormalized quantile corresponding to the center of the previous centroid.
778
    * @param nextIndex          The denormalized quantile corresponding to the center of the following centroid.
779
    * @param previousMean       The mean of the previous centroid.
780
    * @param nextMean           The mean of the following centroid.
781
    * @return  The interpolated mean.
782
    */
783
    static Value quantile(Value index, Value previousIndex, Value nextIndex, Value previousMean,
784
0
                          Value nextMean) {
785
0
        const auto delta = nextIndex - previousIndex;
786
0
        const auto previousWeight = (nextIndex - index) / delta;
787
0
        const auto nextWeight = (index - previousIndex) / delta;
788
0
        return previousMean * previousWeight + nextMean * nextWeight;
789
0
    }
790
};
791
} // namespace doris