Line | Count | Source |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | |
18 | | /* |
19 | | * Licensed to Derrick R. Burns under one or more |
20 | | * contributor license agreements. See the NOTICES file distributed with |
21 | | * this work for additional information regarding copyright ownership. |
22 | | * The ASF licenses this file to You under the Apache License, Version 2.0 |
23 | | * (the "License"); you may not use this file except in compliance with |
24 | | * the License. You may obtain a copy of the License at |
25 | | * |
26 | | * http://www.apache.org/licenses/LICENSE-2.0 |
27 | | * |
28 | | * Unless required by applicable law or agreed to in writing, software |
29 | | * distributed under the License is distributed on an "AS IS" BASIS, |
30 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
31 | | * See the License for the specific language governing permissions and |
32 | | * limitations under the License. |
33 | | */ |
34 | | |
35 | | // T-Digest : Percentile and Quantile Estimation of Big Data |
36 | | // A new data structure for accurate on-line accumulation of rank-based statistics |
37 | | // such as quantiles and trimmed means. |
38 | | // See original paper: "Computing extremely accurate quantiles using t-digest" |
39 | | // by Ted Dunning and Otmar Ertl for more details |
40 | | // https://github.com/tdunning/t-digest/blob/07b8f2ca2be8d0a9f04df2feadad5ddc1bb73c88/docs/t-digest-paper/histo.pdf. |
41 | | // https://github.com/derrickburns/tdigest |
42 | | |
43 | | #pragma once |
44 | | |
45 | | #include <pdqsort.h> |
46 | | |
47 | | #include <algorithm> |
48 | | #include <cfloat> |
49 | | #include <cmath> |
50 | | #include <iostream> |
51 | | #include <memory> |
52 | | #include <queue> |
53 | | #include <utility> |
54 | | #include <vector> |
55 | | |
56 | | #include "common/factory_creator.h" |
57 | | #include "common/logging.h" |
58 | | |
59 | | namespace doris { |
60 | | |
61 | | using Value = float; |
62 | | using Weight = float; |
63 | | using Index = size_t; |
64 | | |
65 | | const size_t kHighWater = 40000; |
66 | | |
67 | | class Centroid { |
68 | | public: |
69 | 17.5k | Centroid() : Centroid(0.0, 0.0) {} |
70 | | |
71 | 47.6k | Centroid(Value mean, Weight weight) : _mean(mean), _weight(weight) {} |
72 | | |
73 | 518k | Value mean() const noexcept { return _mean; } |
74 | | |
75 | 61.6k | Weight weight() const noexcept { return _weight; } |
76 | | |
77 | 2.73k | Value& mean() noexcept { return _mean; } |
78 | | |
79 | 1.53k | Weight& weight() noexcept { return _weight; } |
80 | | |
81 | 23.4k | void add(const Centroid& c) { |
82 | 23.4k | DCHECK_GT(c._weight, 0); |
83 | 23.4k | if (_weight != 0.0) { |
84 | 23.4k | _weight += c._weight; |
85 | 23.4k | _mean += c._weight * (c._mean - _mean) / _weight; |
86 | 23.4k | } else { |
87 | 0 | _weight = c._weight; |
88 | 0 | _mean = c._mean; |
89 | 0 | } |
90 | 23.4k | } |
91 | | |
92 | | private: |
93 | | Value _mean = 0; |
94 | | Weight _weight = 0; |
95 | | }; |
96 | | |
97 | | struct CentroidList { |
98 | 1 | CentroidList(const std::vector<Centroid>& s) : iter(s.cbegin()), end(s.cend()) {} |
99 | | std::vector<Centroid>::const_iterator iter; |
100 | | std::vector<Centroid>::const_iterator end; |
101 | | |
102 | 100 | bool advance() { return ++iter != end; } |
103 | | }; |
104 | | |
105 | | class CentroidListComparator { |
106 | | public: |
107 | | CentroidListComparator() = default; |
108 | | |
109 | 0 | bool operator()(const CentroidList& left, const CentroidList& right) const { |
110 | 0 | return left.iter->mean() > right.iter->mean(); |
111 | 0 | } |
112 | | }; |
113 | | |
114 | | using CentroidListQueue = |
115 | | std::priority_queue<CentroidList, std::vector<CentroidList>, CentroidListComparator>; |
116 | | |
117 | | struct CentroidComparator { |
118 | 258k | bool operator()(const Centroid& a, const Centroid& b) const { return a.mean() < b.mean(); } |
119 | | }; |
120 | | |
121 | | class TDigest { |
122 | | ENABLE_FACTORY_CREATOR(TDigest); |
123 | | |
124 | | class TDigestComparator { |
125 | | public: |
126 | | TDigestComparator() = default; |
127 | | |
128 | 0 | bool operator()(const TDigest* left, const TDigest* right) const { |
129 | 0 | return left->totalSize() > right->totalSize(); |
130 | 0 | } |
131 | | }; |
132 | | using TDigestQueue = |
133 | | std::priority_queue<const TDigest*, std::vector<const TDigest*>, TDigestComparator>; |
134 | | |
135 | | public: |
136 | 0 | TDigest() : TDigest(10000) {} |
137 | | |
138 | 1.10k | explicit TDigest(Value compression) : TDigest(compression, 0) {} |
139 | | |
140 | 1.10k | TDigest(Value compression, Index bufferSize) : TDigest(compression, bufferSize, 0) {} |
141 | | |
142 | | TDigest(Value compression, Index unmergedSize, Index mergedSize) |
143 | 1.10k | : _compression(compression), |
144 | 1.10k | _max_processed(processedSize(mergedSize, compression)), |
145 | 1.10k | _max_unprocessed(unprocessedSize(unmergedSize, compression)) { |
146 | 1.10k | _processed.reserve(_max_processed); |
147 | 1.10k | _unprocessed.reserve(_max_unprocessed + 1); |
148 | 1.10k | } |
149 | | |
150 | | TDigest(std::vector<Centroid>&& processed, std::vector<Centroid>&& unprocessed, |
151 | | Value compression, Index unmergedSize, Index mergedSize) |
152 | 0 | : TDigest(compression, unmergedSize, mergedSize) { |
153 | 0 | _processed = std::move(processed); |
154 | 0 | _unprocessed = std::move(unprocessed); |
155 | 0 |
|
156 | 0 | _processed_weight = weight(_processed); |
157 | 0 | _unprocessed_weight = weight(_unprocessed); |
158 | 0 | if (_processed.size() > 0) { |
159 | 0 | _min = std::min(_min, _processed[0].mean()); |
160 | 0 | _max = std::max(_max, (_processed.cend() - 1)->mean()); |
161 | 0 | } |
162 | 0 | updateCumulative(); |
163 | 0 | } |
164 | | |
165 | 0 | static Weight weight(std::vector<Centroid>& centroids) noexcept { |
166 | 0 | Weight w = 0.0; |
167 | 0 | for (auto centroid : centroids) { |
168 | 0 | w += centroid.weight(); |
169 | 0 | } |
170 | 0 | return w; |
171 | 0 | } |
172 | | |
173 | 0 | TDigest& operator=(TDigest&& o) { |
174 | 0 | _compression = o._compression; |
175 | 0 | _max_processed = o._max_processed; |
176 | 0 | _max_unprocessed = o._max_unprocessed; |
177 | 0 | _processed_weight = o._processed_weight; |
178 | 0 | _unprocessed_weight = o._unprocessed_weight; |
179 | 0 | _processed = std::move(o._processed); |
180 | 0 | _unprocessed = std::move(o._unprocessed); |
181 | 0 | _cumulative = std::move(o._cumulative); |
182 | 0 | _min = o._min; |
183 | 0 | _max = o._max; |
184 | 0 | return *this; |
185 | 0 | } |
186 | | |
187 | | TDigest(TDigest&& o) |
188 | | : TDigest(std::move(o._processed), std::move(o._unprocessed), o._compression, |
189 | 0 | o._max_unprocessed, o._max_processed) {} |
190 | | |
191 | 1.10k | static inline Index processedSize(Index size, Value compression) noexcept { |
192 | 1.10k | return (size == 0) ? static_cast<Index>(2 * std::ceil(compression)) : size; |
193 | 1.10k | } |
194 | | |
195 | 1.10k | static inline Index unprocessedSize(Index size, Value compression) noexcept { |
196 | 1.10k | return (size == 0) ? static_cast<Index>(8 * std::ceil(compression)) : size; |
197 | 1.10k | } |
198 | | |
199 | | // merge in another t-digest |
200 | 315 | void merge(const TDigest* other) { |
201 | 315 | std::vector<const TDigest*> others {other}; |
202 | 315 | add(others.cbegin(), others.cend()); |
203 | 315 | } |
204 | | |
205 | | const std::vector<Centroid>& processed() const { return _processed; } |
206 | | |
207 | 0 | const std::vector<Centroid>& unprocessed() const { return _unprocessed; } |
208 | | |
209 | 0 | Index maxUnprocessed() const { return _max_unprocessed; } |
210 | | |
211 | 0 | Index maxProcessed() const { return _max_processed; } |
212 | | |
213 | | void add(std::vector<const TDigest*> digests) { add(digests.cbegin(), digests.cend()); } |
214 | | |
215 | | // merge in a vector of tdigests in the most efficient manner possible |
216 | | // in constant space |
217 | | // works for any value of kHighWater |
218 | | void add(std::vector<const TDigest*>::const_iterator iter, |
219 | 316 | std::vector<const TDigest*>::const_iterator end) { |
220 | 316 | if (iter != end) { |
221 | 316 | auto size = std::distance(iter, end); |
222 | 316 | TDigestQueue pq(TDigestComparator {}); |
223 | 632 | for (; iter != end; iter++) { |
224 | 316 | pq.push((*iter)); |
225 | 316 | } |
226 | 316 | std::vector<const TDigest*> batch; |
227 | 316 | batch.reserve(size); |
228 | | |
229 | 316 | size_t totalSize = 0; |
230 | 632 | while (!pq.empty()) { |
231 | 316 | const auto* td = pq.top(); |
232 | 316 | batch.push_back(td); |
233 | 316 | pq.pop(); |
234 | 316 | totalSize += td->totalSize(); |
235 | 316 | if (totalSize >= kHighWater || pq.empty()) { |
236 | 316 | mergeProcessed(batch); |
237 | 316 | mergeUnprocessed(batch); |
238 | 316 | processIfNecessary(); |
239 | 316 | batch.clear(); |
240 | 316 | totalSize = 0; |
241 | 316 | } |
242 | 316 | } |
243 | 316 | updateCumulative(); |
244 | 316 | } |
245 | 316 | } |
246 | | |
247 | 0 | Weight processedWeight() const { return _processed_weight; } |
248 | | |
249 | 0 | Weight unprocessedWeight() const { return _unprocessed_weight; } |
250 | | |
251 | 407 | bool haveUnprocessed() const { return _unprocessed.size() > 0; } |
252 | | |
253 | 316 | size_t totalSize() const { return _processed.size() + _unprocessed.size(); } |
254 | | |
255 | | long totalWeight() const { return static_cast<long>(_processed_weight + _unprocessed_weight); } |
256 | | |
257 | | // return the cdf on the t-digest |
258 | | Value cdf(Value x) { |
259 | | if (haveUnprocessed() || isDirty()) { |
260 | | process(); |
261 | | } |
262 | | return cdfProcessed(x); |
263 | | } |
264 | | |
265 | 30.4k | bool isDirty() { |
266 | 30.4k | return _processed.size() > _max_processed || _unprocessed.size() > _max_unprocessed; |
267 | 30.4k | } |
268 | | |
269 | | // return the cdf on the processed values |
270 | 0 | Value cdfProcessed(Value x) const { |
271 | 0 | VLOG_CRITICAL << "cdf value " << x; |
272 | 0 | VLOG_CRITICAL << "processed size " << _processed.size(); |
273 | 0 | if (_processed.size() == 0) { |
274 | 0 | // no data to examine |
275 | 0 | VLOG_CRITICAL << "no processed values"; |
276 | 0 |
|
277 | 0 | return 0.0; |
278 | 0 | } else if (_processed.size() == 1) { |
279 | 0 | VLOG_CRITICAL << "one processed value " |
280 | 0 | << " _min " << _min << " _max " << _max; |
281 | 0 | // exactly one centroid, should have _max==_min |
282 | 0 | auto width = _max - _min; |
283 | 0 | if (x < _min) { |
284 | 0 | return 0.0; |
285 | 0 | } else if (x > _max) { |
286 | 0 | return 1.0; |
287 | 0 | } else if (x - _min <= width) { |
288 | 0 | // _min and _max are too close together to do any viable interpolation |
289 | 0 | return 0.5; |
290 | 0 | } else { |
291 | 0 | // interpolate if somehow we have weight > 0 and _max != _min |
292 | 0 | return (x - _min) / (_max - _min); |
293 | 0 | } |
294 | 0 | } else { |
295 | 0 | auto n = _processed.size(); |
296 | 0 | if (x <= _min) { |
297 | 0 | VLOG_CRITICAL << "below _min " |
298 | 0 | << " _min " << _min << " x " << x; |
299 | 0 | return 0; |
300 | 0 | } |
301 | 0 |
|
302 | 0 | if (x >= _max) { |
303 | 0 | VLOG_CRITICAL << "above _max " |
304 | 0 | << " _max " << _max << " x " << x; |
305 | 0 | return 1; |
306 | 0 | } |
307 | 0 |
|
308 | 0 | // check for the left tail |
309 | 0 | if (x <= mean(0)) { |
310 | 0 | VLOG_CRITICAL << "left tail " |
311 | 0 | << " _min " << _min << " mean(0) " << mean(0) << " x " << x; |
312 | 0 |
|
313 | 0 | // note that this is different than mean(0) > _min ... this guarantees interpolation works |
314 | 0 | if (mean(0) - _min > 0) { |
315 | 0 | return static_cast<Value>((x - _min) / (mean(0) - _min) * weight(0) / |
316 | 0 | _processed_weight / 2.0); |
317 | 0 | } else { |
318 | 0 | return 0; |
319 | 0 | } |
320 | 0 | } |
321 | 0 |
|
322 | 0 | // and the right tail |
323 | 0 | if (x >= mean(n - 1)) { |
324 | 0 | VLOG_CRITICAL << "right tail" |
325 | 0 | << " _max " << _max << " mean(n - 1) " << mean(n - 1) << " x " << x; |
326 | 0 |
|
327 | 0 | if (_max - mean(n - 1) > 0) { |
328 | 0 | return static_cast<Value>(1.0 - (_max - x) / (_max - mean(n - 1)) * |
329 | 0 | weight(n - 1) / _processed_weight / |
330 | 0 | 2.0); |
331 | 0 | } else { |
332 | 0 | return 1; |
333 | 0 | } |
334 | 0 | } |
335 | 0 |
|
336 | 0 | CentroidComparator cc; |
337 | 0 | auto iter = |
338 | 0 | std::upper_bound(_processed.cbegin(), _processed.cend(), Centroid(x, 0), cc); |
339 | 0 |
|
340 | 0 | auto i = std::distance(_processed.cbegin(), iter); |
341 | 0 | auto z1 = x - (iter - 1)->mean(); |
342 | 0 | auto z2 = (iter)->mean() - x; |
343 | 0 | DCHECK_LE(0.0, z1); |
344 | 0 | DCHECK_LE(0.0, z2); |
345 | 0 | VLOG_CRITICAL << "middle " |
346 | 0 | << " z1 " << z1 << " z2 " << z2 << " x " << x; |
347 | 0 |
|
348 | 0 | return weightedAverage(_cumulative[i - 1], z2, _cumulative[i], z1) / _processed_weight; |
349 | 0 | } |
350 | 0 | } |
351 | | |
352 | | // this returns a quantile on the t-digest |
353 | 396 | Value quantile(Value q) { |
354 | 396 | if (haveUnprocessed() || isDirty()) { |
355 | 330 | process(); |
356 | 330 | } |
357 | 396 | return quantileProcessed(q); |
358 | 396 | } |
359 | | |
360 | | // this returns a quantile on the currently processed values without changing the t-digest |
361 | | // the value will not represent the unprocessed values |
362 | 396 | Value quantileProcessed(Value q) const { |
363 | 396 | if (q < 0 || q > 1) { |
364 | 0 | VLOG_CRITICAL << "q should be in [0,1], got " << q; |
365 | 0 | return NAN; |
366 | 0 | } |
367 | | |
368 | 396 | if (_processed.size() == 0) { |
369 | | // no sorted means no data, no way to get a quantile |
370 | 0 | return NAN; |
371 | 396 | } else if (_processed.size() == 1) { |
372 | | // with one data point, all quantiles lead to Rome |
373 | | |
374 | 133 | return mean(0); |
375 | 133 | } |
376 | | |
377 | | // we know that there are at least two sorted now |
378 | 263 | auto n = _processed.size(); |
379 | | |
380 | | // if values were stored in a sorted array, index would be the offset we are Weighterested in |
381 | 263 | const auto index = q * _processed_weight; |
382 | | |
383 | | // at the boundaries, we return _min or _max |
384 | 263 | if (index <= weight(0) / 2.0) { |
385 | 9 | DCHECK_GT(weight(0), 0); |
386 | 9 | return static_cast<Value>(_min + 2.0 * index / weight(0) * (mean(0) - _min)); |
387 | 9 | } |
388 | | |
389 | 254 | auto iter = std::lower_bound(_cumulative.cbegin(), _cumulative.cend(), index); |
390 | | |
391 | 254 | if (iter != _cumulative.cend() && iter != _cumulative.cbegin() && |
392 | 254 | iter + 1 != _cumulative.cend()) { |
393 | 201 | auto i = std::distance(_cumulative.cbegin(), iter); |
394 | 201 | auto z1 = index - *(iter - 1); |
395 | 201 | auto z2 = *(iter)-index; |
396 | | // VLOG_CRITICAL << "z2 " << z2 << " index " << index << " z1 " << z1; |
397 | 201 | return weightedAverage(mean(i - 1), z2, mean(i), z1); |
398 | 201 | } |
399 | | |
400 | 254 | DCHECK_LE(index, _processed_weight); |
401 | 53 | DCHECK_GE(index, _processed_weight - weight(n - 1) / 2.0); |
402 | | |
403 | 53 | auto z1 = static_cast<Value>(index - _processed_weight - weight(n - 1) / 2.0); |
404 | 53 | auto z2 = static_cast<Value>(weight(n - 1) / 2 - z1); |
405 | 53 | return weightedAverage(mean(n - 1), z1, _max, z2); |
406 | 254 | } |
407 | | |
408 | 0 | Value compression() const { return _compression; } |
409 | | |
410 | 19.9k | void add(Value x) { add(x, 1); } |
411 | | |
412 | | void compress() { process(); } |
413 | | |
414 | | // add a single centroid to the unprocessed vector, processing previously unprocessed sorted if our limit has |
415 | | // been reached. |
416 | 30.0k | bool add(Value x, Weight w) { |
417 | 30.0k | if (std::isnan(x)) { |
418 | 0 | return false; |
419 | 0 | } |
420 | 30.0k | _unprocessed.emplace_back(x, w); |
421 | 30.0k | _unprocessed_weight += w; |
422 | 30.0k | processIfNecessary(); |
423 | 30.0k | return true; |
424 | 30.0k | } |
425 | | |
426 | | void add(std::vector<Centroid>::const_iterator iter, |
427 | 0 | std::vector<Centroid>::const_iterator end) { |
428 | 0 | while (iter != end) { |
429 | 0 | const size_t diff = std::distance(iter, end); |
430 | 0 | const size_t room = _max_unprocessed - _unprocessed.size(); |
431 | 0 | auto mid = iter + std::min(diff, room); |
432 | 0 | while (iter != mid) { |
433 | 0 | _unprocessed.push_back(*(iter++)); |
434 | 0 | } |
435 | 0 | if (_unprocessed.size() >= _max_unprocessed) { |
436 | 0 | process(); |
437 | 0 | } |
438 | 0 | } |
439 | 0 | } |
440 | | |
441 | 718 | uint32_t serialized_size() { |
442 | 718 | return static_cast<uint32_t>(sizeof(uint32_t) + sizeof(Value) * 5 + sizeof(Index) * 2 + |
443 | 718 | sizeof(uint32_t) * 3 + _processed.size() * sizeof(Centroid) + |
444 | 718 | _unprocessed.size() * sizeof(Centroid) + |
445 | 718 | _cumulative.size() * sizeof(Weight)); |
446 | 718 | } |
447 | | |
448 | 357 | size_t serialize(uint8_t* writer) { |
449 | 357 | uint8_t* dst = writer; |
450 | 357 | uint32_t total_size = serialized_size(); |
451 | 357 | memcpy(writer, &total_size, sizeof(uint32_t)); |
452 | 357 | writer += sizeof(uint32_t); |
453 | 357 | memcpy(writer, &_compression, sizeof(Value)); |
454 | 357 | writer += sizeof(Value); |
455 | 357 | memcpy(writer, &_min, sizeof(Value)); |
456 | 357 | writer += sizeof(Value); |
457 | 357 | memcpy(writer, &_max, sizeof(Value)); |
458 | 357 | writer += sizeof(Value); |
459 | 357 | memcpy(writer, &_max_processed, sizeof(Index)); |
460 | 357 | writer += sizeof(Index); |
461 | 357 | memcpy(writer, &_max_unprocessed, sizeof(Index)); |
462 | 357 | writer += sizeof(Index); |
463 | 357 | memcpy(writer, &_processed_weight, sizeof(Value)); |
464 | 357 | writer += sizeof(Value); |
465 | 357 | memcpy(writer, &_unprocessed_weight, sizeof(Value)); |
466 | 357 | writer += sizeof(Value); |
467 | | |
468 | 357 | auto size = static_cast<uint32_t>(_processed.size()); |
469 | 357 | memcpy(writer, &size, sizeof(uint32_t)); |
470 | 357 | writer += sizeof(uint32_t); |
471 | 357 | for (int i = 0; i < size; i++) { |
472 | 0 | memcpy(writer, &_processed[i], sizeof(Centroid)); |
473 | 0 | writer += sizeof(Centroid); |
474 | 0 | } |
475 | | |
476 | 357 | size = static_cast<uint32_t>(_unprocessed.size()); |
477 | 357 | memcpy(writer, &size, sizeof(uint32_t)); |
478 | 357 | writer += sizeof(uint32_t); |
479 | | //TODO(weixiang): may be once memcpy is enough! |
480 | 14.0k | for (int i = 0; i < size; i++) { |
481 | 13.6k | memcpy(writer, &_unprocessed[i], sizeof(Centroid)); |
482 | 13.6k | writer += sizeof(Centroid); |
483 | 13.6k | } |
484 | | |
485 | 357 | size = static_cast<uint32_t>(_cumulative.size()); |
486 | 357 | memcpy(writer, &size, sizeof(uint32_t)); |
487 | 357 | writer += sizeof(uint32_t); |
488 | 455 | for (int i = 0; i < size; i++) { |
489 | 98 | memcpy(writer, &_cumulative[i], sizeof(Weight)); |
490 | 98 | writer += sizeof(Weight); |
491 | 98 | } |
492 | 357 | return writer - dst; |
493 | 357 | } |
494 | | |
495 | 318 | void unserialize(const uint8_t* type_reader) { |
496 | 318 | uint32_t total_length = 0; |
497 | 318 | memcpy(&total_length, type_reader, sizeof(uint32_t)); |
498 | 318 | type_reader += sizeof(uint32_t); |
499 | 318 | memcpy(&_compression, type_reader, sizeof(Value)); |
500 | 318 | type_reader += sizeof(Value); |
501 | 318 | memcpy(&_min, type_reader, sizeof(Value)); |
502 | 318 | type_reader += sizeof(Value); |
503 | 318 | memcpy(&_max, type_reader, sizeof(Value)); |
504 | 318 | type_reader += sizeof(Value); |
505 | | |
506 | 318 | memcpy(&_max_processed, type_reader, sizeof(Index)); |
507 | 318 | type_reader += sizeof(Index); |
508 | 318 | memcpy(&_max_unprocessed, type_reader, sizeof(Index)); |
509 | 318 | type_reader += sizeof(Index); |
510 | 318 | memcpy(&_processed_weight, type_reader, sizeof(Value)); |
511 | 318 | type_reader += sizeof(Value); |
512 | 318 | memcpy(&_unprocessed_weight, type_reader, sizeof(Value)); |
513 | 318 | type_reader += sizeof(Value); |
514 | | |
515 | 318 | uint32_t size; |
516 | 318 | memcpy(&size, type_reader, sizeof(uint32_t)); |
517 | 318 | type_reader += sizeof(uint32_t); |
518 | 318 | _processed.resize(size); |
519 | 318 | for (int i = 0; i < size; i++) { |
520 | 0 | memcpy(&_processed[i], type_reader, sizeof(Centroid)); |
521 | 0 | type_reader += sizeof(Centroid); |
522 | 0 | } |
523 | 318 | memcpy(&size, type_reader, sizeof(uint32_t)); |
524 | 318 | type_reader += sizeof(uint32_t); |
525 | 318 | _unprocessed.resize(size); |
526 | 17.8k | for (int i = 0; i < size; i++) { |
527 | 17.5k | memcpy(&_unprocessed[i], type_reader, sizeof(Centroid)); |
528 | 17.5k | type_reader += sizeof(Centroid); |
529 | 17.5k | } |
530 | 318 | memcpy(&size, type_reader, sizeof(uint32_t)); |
531 | 318 | type_reader += sizeof(uint32_t); |
532 | 318 | _cumulative.resize(size); |
533 | 377 | for (int i = 0; i < size; i++) { |
534 | 59 | memcpy(&_cumulative[i], type_reader, sizeof(Weight)); |
535 | 59 | type_reader += sizeof(Weight); |
536 | 59 | } |
537 | 318 | } |
538 | | |
539 | | private: |
540 | | Value _compression; |
541 | | |
542 | | Value _min = std::numeric_limits<Value>::max(); |
543 | | |
544 | | Value _max = std::numeric_limits<Value>::min(); |
545 | | |
546 | | Index _max_processed; |
547 | | |
548 | | Index _max_unprocessed; |
549 | | |
550 | | Value _processed_weight = 0.0; |
551 | | |
552 | | Value _unprocessed_weight = 0.0; |
553 | | |
554 | | std::vector<Centroid> _processed; |
555 | | |
556 | | std::vector<Centroid> _unprocessed; |
557 | | |
558 | | std::vector<Weight> _cumulative; |
559 | | |
560 | | // return mean of i-th centroid |
561 | 613 | Value mean(int64_t i) const noexcept { return _processed[i].mean(); } |
562 | | |
563 | | // return weight of i-th centroid |
564 | 13.3k | Weight weight(int64_t i) const noexcept { return _processed[i].weight(); } |
565 | | |
566 | | // append all unprocessed centroids into current unprocessed vector |
567 | 316 | void mergeUnprocessed(const std::vector<const TDigest*>& tdigests) { |
568 | 316 | if (tdigests.size() == 0) { |
569 | 0 | return; |
570 | 0 | } |
571 | | |
572 | 316 | size_t total = _unprocessed.size(); |
573 | 316 | for (const auto& td : tdigests) { |
574 | 316 | total += td->_unprocessed.size(); |
575 | 316 | } |
576 | | |
577 | 316 | _unprocessed.reserve(total); |
578 | 316 | for (const auto& td : tdigests) { |
579 | 316 | _unprocessed.insert(_unprocessed.end(), td->_unprocessed.cbegin(), |
580 | 316 | td->_unprocessed.cend()); |
581 | 316 | _unprocessed_weight += td->_unprocessed_weight; |
582 | 316 | } |
583 | 316 | } |
584 | | |
585 | | // merge all processed centroids together into a single sorted vector |
586 | 316 | void mergeProcessed(const std::vector<const TDigest*>& tdigests) { |
587 | 316 | if (tdigests.size() == 0) { |
588 | 0 | return; |
589 | 0 | } |
590 | | |
591 | 316 | size_t total = 0; |
592 | 316 | CentroidListQueue pq(CentroidListComparator {}); |
593 | 316 | for (const auto& td : tdigests) { |
594 | 316 | const auto& sorted = td->_processed; |
595 | 316 | auto size = sorted.size(); |
596 | 316 | if (size > 0) { |
597 | 1 | pq.push(CentroidList(sorted)); |
598 | 1 | total += size; |
599 | 1 | _processed_weight += td->_processed_weight; |
600 | 1 | } |
601 | 316 | } |
602 | 316 | if (total == 0) { |
603 | 315 | return; |
604 | 315 | } |
605 | | |
606 | 1 | if (_processed.size() > 0) { |
607 | 0 | pq.push(CentroidList(_processed)); |
608 | 0 | total += _processed.size(); |
609 | 0 | } |
610 | | |
611 | 1 | std::vector<Centroid> sorted; |
612 | 1 | VLOG_CRITICAL << "total " << total; |
613 | 1 | sorted.reserve(total); |
614 | | |
615 | 101 | while (!pq.empty()) { |
616 | 100 | auto best = pq.top(); |
617 | 100 | pq.pop(); |
618 | 100 | sorted.push_back(*(best.iter)); |
619 | 100 | if (best.advance()) { |
620 | 99 | pq.push(best); |
621 | 99 | } |
622 | 100 | } |
623 | 1 | _processed = std::move(sorted); |
624 | 1 | if (_processed.size() > 0) { |
625 | 1 | _min = std::min(_min, _processed[0].mean()); |
626 | 1 | _max = std::max(_max, (_processed.cend() - 1)->mean()); |
627 | 1 | } |
628 | 1 | } |
629 | | |
630 | 30.3k | void processIfNecessary() { |
631 | 30.3k | if (isDirty()) { |
632 | 1 | process(); |
633 | 1 | } |
634 | 30.3k | } |
635 | | |
636 | 650 | void updateCumulative() { |
637 | 650 | const auto n = _processed.size(); |
638 | 650 | _cumulative.clear(); |
639 | 650 | _cumulative.reserve(n + 1); |
640 | 650 | Weight previous = 0.0; |
641 | 13.5k | for (Index i = 0; i < n; i++) { |
642 | 12.8k | Weight current = weight(i); |
643 | 12.8k | auto halfCurrent = static_cast<Weight>(current / 2.0); |
644 | 12.8k | _cumulative.push_back(previous + halfCurrent); |
645 | 12.8k | previous = previous + current; |
646 | 12.8k | } |
647 | 650 | _cumulative.push_back(previous); |
648 | 650 | } |
649 | | |
650 | | // merges _unprocessed centroids and _processed centroids together and processes them |
651 | | // when complete, _unprocessed will be empty and _processed will have at most _max_processed centroids |
652 | 334 | void process() { |
653 | 334 | CentroidComparator cc; |
654 | | // select percentile_approx(lo_orderkey,0.5) from lineorder; |
655 | | // have test pdqsort and RadixSort, find here pdqsort performance is better when data is struct Centroid |
656 | | // But when sort plain type like int/float of std::vector<T>, find RadixSort is better |
657 | 334 | pdqsort(_unprocessed.begin(), _unprocessed.end(), cc); |
658 | 334 | auto count = _unprocessed.size(); |
659 | 334 | _unprocessed.insert(_unprocessed.end(), _processed.cbegin(), _processed.cend()); |
660 | 334 | std::inplace_merge(_unprocessed.begin(), _unprocessed.begin() + count, _unprocessed.end(), |
661 | 334 | cc); |
662 | | |
663 | 334 | _processed_weight += _unprocessed_weight; |
664 | 334 | _unprocessed_weight = 0; |
665 | 334 | _processed.clear(); |
666 | | |
667 | 334 | _processed.push_back(_unprocessed[0]); |
668 | 334 | Weight wSoFar = _unprocessed[0].weight(); |
669 | 334 | Weight wLimit = _processed_weight * integratedQ(1.0); |
670 | | |
671 | 334 | auto end = _unprocessed.end(); |
672 | 36.2k | for (auto iter = _unprocessed.cbegin() + 1; iter < end; iter++) { |
673 | 35.9k | const auto& centroid = *iter; |
674 | 35.9k | Weight projectedW = wSoFar + centroid.weight(); |
675 | 35.9k | if (projectedW <= wLimit) { |
676 | 23.4k | wSoFar = projectedW; |
677 | 23.4k | (_processed.end() - 1)->add(centroid); |
678 | 23.4k | } else { |
679 | 12.4k | auto k1 = integratedLocation(wSoFar / _processed_weight); |
680 | 12.4k | wLimit = _processed_weight * integratedQ(static_cast<Value>(k1 + 1.0)); |
681 | 12.4k | wSoFar += centroid.weight(); |
682 | 12.4k | _processed.emplace_back(centroid); |
683 | 12.4k | } |
684 | 35.9k | } |
685 | 334 | _unprocessed.clear(); |
686 | 334 | _min = std::min(_min, _processed[0].mean()); |
687 | 334 | VLOG_CRITICAL << "new _min " << _min; |
688 | 334 | _max = std::max(_max, (_processed.cend() - 1)->mean()); |
689 | 334 | VLOG_CRITICAL << "new _max " << _max; |
690 | 334 | updateCumulative(); |
691 | 334 | } |
692 | | |
693 | 0 | size_t checkWeights(const std::vector<Centroid>& sorted, Value total) { |
694 | 0 | size_t badWeight = 0; |
695 | 0 | auto k1 = 0.0; |
696 | 0 | auto q = 0.0; |
697 | 0 | for (auto iter = sorted.cbegin(); iter != sorted.cend(); iter++) { |
698 | 0 | auto w = iter->weight(); |
699 | 0 | auto dq = w / total; |
700 | 0 | auto k2 = integratedLocation(static_cast<Value>(q + dq)); |
701 | 0 | if (k2 - k1 > 1 && w != 1) { |
702 | 0 | VLOG_CRITICAL << "Oversize centroid at " << std::distance(sorted.cbegin(), iter) |
703 | 0 | << " k1 " << k1 << " k2 " << k2 << " dk " << (k2 - k1) << " w " << w |
704 | 0 | << " q " << q; |
705 | 0 | badWeight++; |
706 | 0 | } |
707 | 0 | if (k2 - k1 > 1.5 && w != 1) { |
708 | 0 | VLOG_CRITICAL << "Egregiously Oversize centroid at " |
709 | 0 | << std::distance(sorted.cbegin(), iter) << " k1 " << k1 << " k2 " |
710 | 0 | << k2 << " dk " << (k2 - k1) << " w " << w << " q " << q; |
711 | 0 | badWeight++; |
712 | 0 | } |
713 | 0 | q += dq; |
714 | 0 | k1 = k2; |
715 | 0 | } |
716 | 0 |
|
717 | 0 | return badWeight; |
718 | 0 | } |
719 | | |
720 | | /** |
721 | | * Converts a quantile into a centroid scale value. The centroid scale is nomin_ally |
722 | | * the number k of the centroid that a quantile point q should belong to. Due to |
723 | | * round-offs, however, we can't align things perfectly without splitting points |
724 | | * and sorted. We don't want to do that, so we have to allow for offsets. |
725 | | * In the end, the criterion is that any quantile range that spans a centroid |
726 | | * scale range more than one should be split across more than one centroid if |
727 | | * possible. This won't be possible if the quantile range refers to a single point |
728 | | * or an already existing centroid. |
729 | | * <p/> |
730 | | * This mapping is steep near q=0 or q=1 so each centroid there will correspond to |
731 | | * less q range. Near q=0.5, the mapping is flatter so that sorted there will |
732 | | * represent a larger chunk of quantiles. |
733 | | * |
734 | | * @param q The quantile scale value to be mapped. |
735 | | * @return The centroid scale value corresponding to q. |
736 | | */ |
737 | 12.4k | Value integratedLocation(Value q) const { |
738 | 12.4k | return static_cast<Value>(_compression * (std::asin(2.0 * q - 1.0) + M_PI / 2) / M_PI); |
739 | 12.4k | } |
740 | | |
741 | 12.7k | Value integratedQ(Value k) const { |
742 | 12.7k | return static_cast<Value>( |
743 | 12.7k | (std::sin(std::min(k, _compression) * M_PI / _compression - M_PI / 2) + 1) / 2); |
744 | 12.7k | } |
745 | | |
746 | | /** |
747 | | * Same as {@link #weightedAverageSorted(Value, Value, Value, Value)} but flips |
748 | | * the order of the variables if <code>x2</code> is greater than |
749 | | * <code>x1</code>. |
750 | | */ |
751 | 262 | static Value weightedAverage(Value x1, Value w1, Value x2, Value w2) { |
752 | 262 | return (x1 <= x2) ? weightedAverageSorted(x1, w1, x2, w2) |
753 | 262 | : weightedAverageSorted(x2, w2, x1, w1); |
754 | 262 | } |
755 | | |
756 | | /** |
757 | | * Compute the weighted average between <code>x1</code> with a weight of |
758 | | * <code>w1</code> and <code>x2</code> with a weight of <code>w2</code>. |
759 | | * This expects <code>x1</code> to be less than or equal to <code>x2</code> |
760 | | * and is guaranteed to return a number between <code>x1</code> and |
761 | | * <code>x2</code>. |
762 | | */ |
763 | 262 | static Value weightedAverageSorted(Value x1, Value w1, Value x2, Value w2) { |
764 | 262 | DCHECK_LE(x1, x2); |
765 | 262 | const Value x = (x1 * w1 + x2 * w2) / (w1 + w2); |
766 | 262 | return std::max(x1, std::min(x, x2)); |
767 | 262 | } |
768 | | |
769 | 0 | static Value interpolate(Value x, Value x0, Value x1) { return (x - x0) / (x1 - x0); } |
770 | | |
771 | | /** |
772 | | * Computes an interpolated value of a quantile that is between two sorted. |
773 | | * |
774 | | * Index is the quantile desired multiplied by the total number of samples - 1. |
775 | | * |
776 | | * @param index Denormalized quantile desired |
777 | | * @param previousIndex The denormalized quantile corresponding to the center of the previous centroid. |
778 | | * @param nextIndex The denormalized quantile corresponding to the center of the following centroid. |
779 | | * @param previousMean The mean of the previous centroid. |
780 | | * @param nextMean The mean of the following centroid. |
781 | | * @return The interpolated mean. |
782 | | */ |
783 | | static Value quantile(Value index, Value previousIndex, Value nextIndex, Value previousMean, |
784 | 0 | Value nextMean) { |
785 | 0 | const auto delta = nextIndex - previousIndex; |
786 | 0 | const auto previousWeight = (nextIndex - index) / delta; |
787 | 0 | const auto nextWeight = (index - previousIndex) / delta; |
788 | 0 | return previousMean * previousWeight + nextMean * nextWeight; |
789 | 0 | } |
790 | | }; |
791 | | } // namespace doris |