Coverage Report

Created: 2024-11-20 12:56

/root/doris/be/src/util/threadpool.cpp
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
// This file is copied from
18
// https://github.com/apache/impala/blob/branch-2.9.0/be/src/util/threadpool.cc
19
// and modified by Doris
20
21
#include "util/threadpool.h"
22
23
#include <algorithm>
24
#include <cstdint>
25
#include <limits>
26
#include <ostream>
27
#include <thread>
28
#include <utility>
29
30
#include "common/logging.h"
31
#include "gutil/map-util.h"
32
#include "gutil/port.h"
33
#include "gutil/strings/substitute.h"
34
#include "util/debug/sanitizer_scopes.h"
35
#include "util/scoped_cleanup.h"
36
#include "util/thread.h"
37
38
namespace doris {
39
using namespace ErrorCode;
40
41
using std::string;
42
using strings::Substitute;
43
44
class FunctionRunnable : public Runnable {
45
public:
46
8.20k
    explicit FunctionRunnable(std::function<void()> func) : _func(std::move(func)) {}
47
48
4.17k
    void run() override { _func(); }
49
50
private:
51
    std::function<void()> _func;
52
};
53
54
ThreadPoolBuilder::ThreadPoolBuilder(string name)
55
        : _name(std::move(name)),
56
          _min_threads(0),
57
          _max_threads(std::thread::hardware_concurrency()),
58
          _max_queue_size(std::numeric_limits<int>::max()),
59
241
          _idle_timeout(std::chrono::milliseconds(500)) {}
60
61
193
ThreadPoolBuilder& ThreadPoolBuilder::set_min_threads(int min_threads) {
62
193
    CHECK_GE(min_threads, 0);
63
193
    _min_threads = min_threads;
64
193
    return *this;
65
193
}
66
67
213
ThreadPoolBuilder& ThreadPoolBuilder::set_max_threads(int max_threads) {
68
213
    CHECK_GT(max_threads, 0);
69
213
    _max_threads = max_threads;
70
213
    return *this;
71
213
}
72
73
29
ThreadPoolBuilder& ThreadPoolBuilder::set_max_queue_size(int max_queue_size) {
74
29
    _max_queue_size = max_queue_size;
75
29
    return *this;
76
29
}
77
78
ThreadPoolToken::ThreadPoolToken(ThreadPool* pool, ThreadPool::ExecutionMode mode,
79
                                 int max_concurrency)
80
        : _mode(mode),
81
          _pool(pool),
82
          _state(State::IDLE),
83
          _active_threads(0),
84
          _max_concurrency(max_concurrency),
85
          _num_submitted_tasks(0),
86
2.00k
          _num_unsubmitted_tasks(0) {
87
2.00k
    if (max_concurrency == 1 && mode != ThreadPool::ExecutionMode::SERIAL) {
88
6
        _mode = ThreadPool::ExecutionMode::SERIAL;
89
6
    }
90
2.00k
}
91
92
2.00k
ThreadPoolToken::~ThreadPoolToken() {
93
2.00k
    shutdown();
94
2.00k
    _pool->release_token(this);
95
2.00k
}
96
97
6.94k
Status ThreadPoolToken::submit(std::shared_ptr<Runnable> r) {
98
6.94k
    return _pool->do_submit(std::move(r), this);
99
6.94k
}
100
101
7.32k
Status ThreadPoolToken::submit_func(std::function<void()> f) {
102
7.32k
    return submit(std::make_shared<FunctionRunnable>(std::move(f)));
103
7.32k
}
104
105
3.11k
void ThreadPoolToken::shutdown() {
106
3.11k
    std::unique_lock<std::mutex> l(_pool->_lock);
107
3.11k
    _pool->check_not_pool_thread_unlocked();
108
109
    // Clear the queue under the lock, but defer the releasing of the tasks
110
    // outside the lock, in case there are concurrent threads wanting to access
111
    // the ThreadPool. The task's destructors may acquire locks, etc, so this
112
    // also prevents lock inversions.
113
3.11k
    std::deque<ThreadPool::Task> to_release = std::move(_entries);
114
3.11k
    _pool->_total_queued_tasks -= to_release.size();
115
116
3.11k
    switch (state()) {
117
777
    case State::IDLE:
118
        // There were no tasks outstanding; we can quiesce the token immediately.
119
777
        transition(State::QUIESCED);
120
777
        break;
121
933
    case State::RUNNING:
122
        // There were outstanding tasks. If any are still running, switch to
123
        // QUIESCING and wait for them to finish (the worker thread executing
124
        // the token's last task will switch the token to QUIESCED). Otherwise,
125
        // we can quiesce the token immediately.
126
127
        // Note: this is an O(n) operation, but it's expected to be infrequent.
128
        // Plus doing it this way (rather than switching to QUIESCING and waiting
129
        // for a worker thread to process the queue entry) helps retain state
130
        // transition symmetry with ThreadPool::shutdown.
131
9.19k
        for (auto it = _pool->_queue.begin(); it != _pool->_queue.end();) {
132
8.26k
            if (*it == this) {
133
769
                it = _pool->_queue.erase(it);
134
7.49k
            } else {
135
7.49k
                it++;
136
7.49k
            }
137
8.26k
        }
138
139
933
        if (_active_threads == 0) {
140
380
            transition(State::QUIESCED);
141
380
            break;
142
380
        }
143
553
        transition(State::QUIESCING);
144
553
        [[fallthrough]];
145
564
    case State::QUIESCING:
146
        // The token is already quiescing. Just wait for a worker thread to
147
        // switch it to QUIESCED.
148
1.12k
        _not_running_cond.wait(l, [this]() { return state() == State::QUIESCED; });
149
564
        break;
150
1.38k
    default:
151
1.38k
        break;
152
3.11k
    }
153
3.11k
}
154
155
633
void ThreadPoolToken::wait() {
156
633
    std::unique_lock<std::mutex> l(_pool->_lock);
157
633
    _pool->check_not_pool_thread_unlocked();
158
824
    _not_running_cond.wait(l, [this]() { return !is_active(); });
159
633
}
160
161
6.65k
void ThreadPoolToken::transition(State new_state) {
162
6.65k
#ifndef NDEBUG
163
6.65k
    CHECK_NE(_state, new_state);
164
165
6.65k
    switch (_state) {
166
3.57k
    case State::IDLE:
167
3.57k
        CHECK(new_state == State::RUNNING || new_state == State::QUIESCED);
168
3.57k
        if (new_state == State::RUNNING) {
169
2.51k
            CHECK(!_entries.empty());
170
2.51k
        } else {
171
1.06k
            CHECK(_entries.empty());
172
1.06k
            CHECK_EQ(_active_threads, 0);
173
1.06k
        }
174
3.57k
        break;
175
2.51k
    case State::RUNNING:
176
2.51k
        CHECK(new_state == State::IDLE || new_state == State::QUIESCING ||
177
2.51k
              new_state == State::QUIESCED);
178
2.51k
        CHECK(_entries.empty());
179
2.51k
        if (new_state == State::QUIESCING) {
180
563
            CHECK_GT(_active_threads, 0);
181
563
        }
182
2.51k
        break;
183
563
    case State::QUIESCING:
184
563
        CHECK(new_state == State::QUIESCED);
185
563
        CHECK_EQ(_active_threads, 0);
186
563
        break;
187
0
    case State::QUIESCED:
188
0
        CHECK(false); // QUIESCED is a terminal state
189
0
        break;
190
0
    default:
191
0
        LOG(FATAL) << "Unknown token state: " << _state;
192
6.65k
    }
193
6.65k
#endif
194
195
    // Take actions based on the state we're entering.
196
6.65k
    switch (new_state) {
197
1.57k
    case State::IDLE:
198
3.57k
    case State::QUIESCED:
199
3.57k
        _not_running_cond.notify_all();
200
3.57k
        break;
201
3.07k
    default:
202
3.07k
        break;
203
6.65k
    }
204
205
6.65k
    _state = new_state;
206
6.65k
}
207
208
0
const char* ThreadPoolToken::state_to_string(State s) {
209
0
    switch (s) {
210
0
    case State::IDLE:
211
0
        return "IDLE";
212
0
        break;
213
0
    case State::RUNNING:
214
0
        return "RUNNING";
215
0
        break;
216
0
    case State::QUIESCING:
217
0
        return "QUIESCING";
218
0
        break;
219
0
    case State::QUIESCED:
220
0
        return "QUIESCED";
221
0
        break;
222
0
    }
223
0
    return "<cannot reach here>";
224
0
}
225
226
5.87k
bool ThreadPoolToken::need_dispatch() {
227
5.87k
    return _state == ThreadPoolToken::State::IDLE ||
228
5.87k
           (_mode == ThreadPool::ExecutionMode::CONCURRENT &&
229
3.36k
            _num_submitted_tasks < _max_concurrency);
230
5.87k
}
231
232
ThreadPool::ThreadPool(const ThreadPoolBuilder& builder)
233
        : _name(builder._name),
234
          _min_threads(builder._min_threads),
235
          _max_threads(builder._max_threads),
236
          _max_queue_size(builder._max_queue_size),
237
          _idle_timeout(builder._idle_timeout),
238
          _pool_status(Status::Uninitialized("The pool was not initialized.")),
239
          _num_threads(0),
240
          _num_threads_pending_start(0),
241
          _active_threads(0),
242
          _total_queued_tasks(0),
243
224
          _tokenless(new_token(ExecutionMode::CONCURRENT)) {}
244
245
224
ThreadPool::~ThreadPool() {
246
    // There should only be one live token: the one used in tokenless submission.
247
224
    CHECK_EQ(1, _tokens.size()) << strings::Substitute(
248
0
            "Threadpool $0 destroyed with $1 allocated tokens", _name, _tokens.size());
249
224
    shutdown();
250
224
}
251
252
224
Status ThreadPool::init() {
253
224
    if (!_pool_status.is<UNINITIALIZED>()) {
254
0
        return Status::NotSupported("The thread pool {} is already initialized", _name);
255
0
    }
256
224
    _pool_status = Status::OK();
257
224
    _num_threads_pending_start = _min_threads;
258
1.83k
    for (int i = 0; i < _min_threads; i++) {
259
1.60k
        Status status = create_thread();
260
1.60k
        if (!status.ok()) {
261
0
            shutdown();
262
0
            return status;
263
0
        }
264
1.60k
    }
265
224
    return Status::OK();
266
224
}
267
268
369
void ThreadPool::shutdown() {
269
369
    debug::ScopedTSANIgnoreReadsAndWrites ignore_tsan;
270
369
    std::unique_lock<std::mutex> l(_lock);
271
369
    check_not_pool_thread_unlocked();
272
273
    // Note: this is the same error seen at submission if the pool is at
274
    // capacity, so clients can't tell them apart. This isn't really a practical
275
    // concern though because shutting down a pool typically requires clients to
276
    // be quiesced first, so there's no danger of a client getting confused.
277
    // Not print stack trace here
278
369
    _pool_status = Status::Error<SERVICE_UNAVAILABLE, false>(
279
369
            "The thread pool {} has been shut down.", _name);
280
281
    // Clear the various queues under the lock, but defer the releasing
282
    // of the tasks outside the lock, in case there are concurrent threads
283
    // wanting to access the ThreadPool. The task's destructors may acquire
284
    // locks, etc, so this also prevents lock inversions.
285
369
    _queue.clear();
286
287
369
    std::deque<std::deque<Task>> to_release;
288
470
    for (auto* t : _tokens) {
289
470
        if (!t->_entries.empty()) {
290
1
            to_release.emplace_back(std::move(t->_entries));
291
1
        }
292
470
        switch (t->state()) {
293
286
        case ThreadPoolToken::State::IDLE:
294
            // The token is idle; we can quiesce it immediately.
295
286
            t->transition(ThreadPoolToken::State::QUIESCED);
296
286
            break;
297
11
        case ThreadPoolToken::State::RUNNING:
298
            // The token has tasks associated with it. If they're merely queued
299
            // (i.e. there are no active threads), the tasks will have been removed
300
            // above and we can quiesce immediately. Otherwise, we need to wait for
301
            // the threads to finish.
302
11
            t->transition(t->_active_threads > 0 ? ThreadPoolToken::State::QUIESCING
303
11
                                                 : ThreadPoolToken::State::QUIESCED);
304
11
            break;
305
173
        default:
306
173
            break;
307
470
        }
308
470
    }
309
310
    // The queues are empty. Wake any sleeping worker threads and wait for all
311
    // of them to exit. Some worker threads will exit immediately upon waking,
312
    // while others will exit after they finish executing an outstanding task.
313
369
    _total_queued_tasks = 0;
314
2.05k
    while (!_idle_threads.empty()) {
315
1.68k
        _idle_threads.front().not_empty.notify_one();
316
1.68k
        _idle_threads.pop_front();
317
1.68k
    }
318
319
559
    _no_threads_cond.wait(l, [this]() { return _num_threads + _num_threads_pending_start == 0; });
320
321
    // All the threads have exited. Check the state of each token.
322
470
    for (auto* t : _tokens) {
323
470
        DCHECK(t->state() == ThreadPoolToken::State::IDLE ||
324
470
               t->state() == ThreadPoolToken::State::QUIESCED);
325
470
    }
326
369
}
327
328
2.00k
std::unique_ptr<ThreadPoolToken> ThreadPool::new_token(ExecutionMode mode, int max_concurrency) {
329
2.00k
    std::lock_guard<std::mutex> l(_lock);
330
2.00k
    std::unique_ptr<ThreadPoolToken> t(new ThreadPoolToken(this, mode, max_concurrency));
331
2.00k
    InsertOrDie(&_tokens, t.get());
332
2.00k
    return t;
333
2.00k
}
334
335
2.00k
void ThreadPool::release_token(ThreadPoolToken* t) {
336
2.00k
    std::lock_guard<std::mutex> l(_lock);
337
2.00k
    CHECK(!t->is_active()) << strings::Substitute("Token with state $0 may not be released",
338
0
                                                  ThreadPoolToken::state_to_string(t->state()));
339
2.00k
    CHECK_EQ(1, _tokens.erase(t));
340
2.00k
}
341
342
1.14k
Status ThreadPool::submit(std::shared_ptr<Runnable> r) {
343
1.14k
    return do_submit(std::move(r), _tokenless.get());
344
1.14k
}
345
346
989
Status ThreadPool::submit_func(std::function<void()> f) {
347
989
    return submit(std::make_shared<FunctionRunnable>(std::move(f)));
348
989
}
349
350
8.44k
Status ThreadPool::do_submit(std::shared_ptr<Runnable> r, ThreadPoolToken* token) {
351
8.44k
    DCHECK(token);
352
8.44k
    std::chrono::time_point<std::chrono::system_clock> submit_time =
353
8.44k
            std::chrono::system_clock::now();
354
355
8.44k
    std::unique_lock<std::mutex> l(_lock);
356
8.44k
    if (PREDICT_FALSE(!_pool_status.ok())) {
357
0
        return _pool_status;
358
0
    }
359
360
8.44k
    if (PREDICT_FALSE(!token->may_submit_new_tasks())) {
361
2.67k
        return Status::Error<SERVICE_UNAVAILABLE>("Thread pool({}) token was shut down", _name);
362
2.67k
    }
363
364
    // Size limit check.
365
5.76k
    int64_t capacity_remaining = static_cast<int64_t>(_max_threads) - _active_threads +
366
5.76k
                                 static_cast<int64_t>(_max_queue_size) - _total_queued_tasks;
367
5.76k
    if (capacity_remaining < 1) {
368
7
        return Status::Error<SERVICE_UNAVAILABLE>(
369
7
                "Thread pool {} is at capacity ({}/{} tasks running, {}/{} tasks queued)", _name,
370
7
                _num_threads + _num_threads_pending_start, _max_threads, _total_queued_tasks,
371
7
                _max_queue_size);
372
7
    }
373
374
    // Should we create another thread?
375
376
    // We assume that each current inactive thread will grab one item from the
377
    // queue.  If it seems like we'll need another thread, we create one.
378
    //
379
    // Rather than creating the thread here, while holding the lock, we defer
380
    // it to down below. This is because thread creation can be rather slow
381
    // (hundreds of milliseconds in some cases) and we'd like to allow the
382
    // existing threads to continue to process tasks while we do so.
383
    //
384
    // In theory, a currently active thread could finish immediately after this
385
    // calculation but before our new worker starts running. This would mean we
386
    // created a thread we didn't really need. However, this race is unavoidable
387
    // and harmless.
388
    //
389
    // Of course, we never create more than _max_threads threads no matter what.
390
5.76k
    int threads_from_this_submit =
391
5.76k
            token->is_active() && token->mode() == ExecutionMode::SERIAL ? 0 : 1;
392
5.76k
    int inactive_threads = _num_threads + _num_threads_pending_start - _active_threads;
393
5.76k
    int additional_threads =
394
5.76k
            static_cast<int>(_queue.size()) + threads_from_this_submit - inactive_threads;
395
5.76k
    bool need_a_thread = false;
396
5.76k
    if (additional_threads > 0 && _num_threads + _num_threads_pending_start < _max_threads) {
397
636
        need_a_thread = true;
398
636
        _num_threads_pending_start++;
399
636
    }
400
401
5.76k
    Task task;
402
5.76k
    task.runnable = std::move(r);
403
5.76k
    task.submit_time = submit_time;
404
405
    // Add the task to the token's queue.
406
5.76k
    ThreadPoolToken::State state = token->state();
407
5.76k
    DCHECK(state == ThreadPoolToken::State::IDLE || state == ThreadPoolToken::State::RUNNING);
408
5.76k
    token->_entries.emplace_back(std::move(task));
409
    // When we need to execute the task in the token, we submit the token object to the queue.
410
    // There are currently two places where tokens will be submitted to the queue:
411
    // 1. When submitting a new task, if the token is still in the IDLE state,
412
    //    or the concurrency of the token has not reached the online level, it will be added to the queue.
413
    // 2. When the dispatch thread finishes executing a task:
414
    //    1. If it is a SERIAL token, and there are unsubmitted tasks, submit them to the queue.
415
    //    2. If it is a CONCURRENT token, and there are still unsubmitted tasks, and the upper limit of concurrency is not reached,
416
    //       then submitted to the queue.
417
5.76k
    if (token->need_dispatch()) {
418
4.32k
        _queue.emplace_back(token);
419
4.32k
        ++token->_num_submitted_tasks;
420
4.32k
        if (state == ThreadPoolToken::State::IDLE) {
421
2.51k
            token->transition(ThreadPoolToken::State::RUNNING);
422
2.51k
        }
423
4.32k
    } else {
424
1.43k
        ++token->_num_unsubmitted_tasks;
425
1.43k
    }
426
5.76k
    _total_queued_tasks++;
427
428
    // Wake up an idle thread for this task. Choosing the thread at the front of
429
    // the list ensures LIFO semantics as idling threads are also added to the front.
430
    //
431
    // If there are no idle threads, the new task remains on the queue and is
432
    // processed by an active thread (or a thread we're about to create) at some
433
    // point in the future.
434
5.76k
    if (!_idle_threads.empty()) {
435
1.18k
        _idle_threads.front().not_empty.notify_one();
436
1.18k
        _idle_threads.pop_front();
437
1.18k
    }
438
5.76k
    l.unlock();
439
440
5.76k
    if (need_a_thread) {
441
636
        Status status = create_thread();
442
636
        if (!status.ok()) {
443
0
            l.lock();
444
0
            _num_threads_pending_start--;
445
0
            if (_num_threads + _num_threads_pending_start == 0) {
446
                // If we have no threads, we can't do any work.
447
0
                return status;
448
0
            }
449
            // If we failed to create a thread, but there are still some other
450
            // worker threads, log a warning message and continue.
451
0
            LOG(WARNING) << "Thread pool " << _name
452
0
                         << " failed to create thread: " << status.to_string();
453
0
        }
454
636
    }
455
456
5.76k
    return Status::OK();
457
5.76k
}
458
459
30
void ThreadPool::wait() {
460
30
    std::unique_lock<std::mutex> l(_lock);
461
30
    check_not_pool_thread_unlocked();
462
73
    _idle_cond.wait(l, [this]() { return _total_queued_tasks == 0 && _active_threads == 0; });
463
30
}
464
465
2.24k
void ThreadPool::dispatch_thread() {
466
2.24k
    std::unique_lock<std::mutex> l(_lock);
467
2.24k
    debug::ScopedTSANIgnoreReadsAndWrites ignore_tsan;
468
2.24k
    InsertOrDie(&_threads, Thread::current_thread());
469
2.24k
    DCHECK_GT(_num_threads_pending_start, 0);
470
2.24k
    _num_threads++;
471
2.24k
    _num_threads_pending_start--;
472
473
    // Owned by this worker thread and added/removed from _idle_threads as needed.
474
2.24k
    IdleThread me;
475
476
45.7k
    while (true) {
477
        // Note: Status::Aborted() is used to indicate normal shutdown.
478
45.7k
        if (!_pool_status.ok()) {
479
1.72k
            VLOG_CRITICAL << "DispatchThread exiting: " << _pool_status.to_string();
480
1.72k
            break;
481
1.72k
        }
482
483
44.0k
        if (_num_threads + _num_threads_pending_start > _max_threads) {
484
2
            break;
485
2
        }
486
487
44.0k
        if (_queue.empty()) {
488
            // There's no work to do, let's go idle.
489
            //
490
            // Note: if FIFO behavior is desired, it's as simple as changing this to push_back().
491
39.6k
            _idle_threads.push_front(me);
492
39.6k
            SCOPED_CLEANUP({
493
                // For some wake ups (i.e. shutdown or do_submit) this thread is
494
                // guaranteed to be unlinked after being awakened. In others (i.e.
495
                // spurious wake-up or Wait timeout), it'll still be linked.
496
39.6k
                if (me.is_linked()) {
497
39.6k
                    _idle_threads.erase(_idle_threads.iterator_to(me));
498
39.6k
                }
499
39.6k
            });
500
39.6k
            if (me.not_empty.wait_for(l, _idle_timeout) == std::cv_status::timeout) {
501
                // After much investigation, it appears that pthread condition variables have
502
                // a weird behavior in which they can return ETIMEDOUT from timed_wait even if
503
                // another thread did in fact signal. Apparently after a timeout there is some
504
                // brief period during which another thread may actually grab the internal mutex
505
                // protecting the state, signal, and release again before we get the mutex. So,
506
                // we'll recheck the empty queue case regardless.
507
36.9k
                if (_queue.empty() && _num_threads + _num_threads_pending_start > _min_threads) {
508
518
                    VLOG_NOTICE << "Releasing worker thread from pool " << _name << " after "
509
0
                                << std::chrono::duration_cast<std::chrono::milliseconds>(
510
0
                                           _idle_timeout)
511
0
                                           .count()
512
0
                                << "ms of idle time.";
513
518
                    break;
514
518
                }
515
36.9k
            }
516
39.1k
            continue;
517
39.6k
        }
518
519
        // Get the next token and task to execute.
520
4.35k
        ThreadPoolToken* token = _queue.front();
521
4.35k
        _queue.pop_front();
522
4.35k
        DCHECK_EQ(ThreadPoolToken::State::RUNNING, token->state());
523
4.35k
        DCHECK(!token->_entries.empty());
524
4.35k
        Task task = std::move(token->_entries.front());
525
4.35k
        token->_entries.pop_front();
526
4.35k
        token->_active_threads++;
527
4.35k
        --_total_queued_tasks;
528
4.35k
        ++_active_threads;
529
530
4.35k
        l.unlock();
531
532
        // Execute the task
533
4.35k
        task.runnable->run();
534
535
        // Destruct the task while we do not hold the lock.
536
        //
537
        // The task's destructor may be expensive if it has a lot of bound
538
        // objects, and we don't want to block submission of the threadpool.
539
        // In the worst case, the destructor might even try to do something
540
        // with this threadpool, and produce a deadlock.
541
4.35k
        task.runnable.reset();
542
4.35k
        l.lock();
543
544
        // Possible states:
545
        // 1. The token was shut down while we ran its task. Transition to QUIESCED.
546
        // 2. The token has no more queued tasks. Transition back to IDLE.
547
        // 3. The token has more tasks. Requeue it and transition back to RUNNABLE.
548
4.35k
        ThreadPoolToken::State state = token->state();
549
4.35k
        DCHECK(state == ThreadPoolToken::State::RUNNING ||
550
4.35k
               state == ThreadPoolToken::State::QUIESCING);
551
4.35k
        --token->_active_threads;
552
4.35k
        --token->_num_submitted_tasks;
553
554
        // handle shutdown && idle
555
4.35k
        if (token->_active_threads == 0) {
556
3.10k
            if (state == ThreadPoolToken::State::QUIESCING) {
557
563
                DCHECK(token->_entries.empty());
558
563
                token->transition(ThreadPoolToken::State::QUIESCED);
559
2.53k
            } else if (token->_entries.empty()) {
560
1.57k
                token->transition(ThreadPoolToken::State::IDLE);
561
1.57k
            }
562
3.10k
        }
563
564
        // We decrease _num_submitted_tasks holding lock, so the following DCHECK works.
565
4.35k
        DCHECK(token->_num_submitted_tasks < token->_max_concurrency);
566
567
        // If token->state is running and there are unsubmitted tasks in the token, we put
568
        // the token back.
569
4.35k
        if (token->_num_unsubmitted_tasks > 0 && state == ThreadPoolToken::State::RUNNING) {
570
            // SERIAL: if _entries is not empty, then num_unsubmitted_tasks must be greater than 0.
571
            // CONCURRENT: we have to check _num_unsubmitted_tasks because there may be at least 2
572
            // threads are running for the token.
573
787
            _queue.emplace_back(token);
574
787
            ++token->_num_submitted_tasks;
575
787
            --token->_num_unsubmitted_tasks;
576
787
        }
577
578
4.35k
        if (--_active_threads == 0) {
579
700
            _idle_cond.notify_all();
580
700
        }
581
4.35k
    }
582
583
    // It's important that we hold the lock between exiting the loop and dropping
584
    // _num_threads. Otherwise it's possible someone else could come along here
585
    // and add a new task just as the last running thread is about to exit.
586
2.24k
    CHECK(l.owns_lock());
587
588
2.24k
    CHECK_EQ(_threads.erase(Thread::current_thread()), 1);
589
2.24k
    _num_threads--;
590
2.24k
    if (_num_threads + _num_threads_pending_start == 0) {
591
700
        _no_threads_cond.notify_all();
592
593
        // Sanity check: if we're the last thread exiting, the queue ought to be
594
        // empty. Otherwise it will never get processed.
595
700
        CHECK(_queue.empty());
596
700
        DCHECK_EQ(0, _total_queued_tasks);
597
700
    }
598
2.24k
}
599
600
2.24k
Status ThreadPool::create_thread() {
601
2.24k
    return Thread::create("thread pool", strings::Substitute("$0 [worker]", _name),
602
2.24k
                          &ThreadPool::dispatch_thread, this, nullptr);
603
2.24k
}
604
605
4.14k
void ThreadPool::check_not_pool_thread_unlocked() {
606
4.14k
    Thread* current = Thread::current_thread();
607
4.14k
    if (ContainsKey(_threads, current)) {
608
0
        LOG(FATAL) << strings::Substitute(
609
0
                "Thread belonging to thread pool '$0' with "
610
0
                "name '$1' called pool function that would result in deadlock",
611
0
                _name, current->name());
612
0
    }
613
4.14k
}
614
615
4
Status ThreadPool::set_min_threads(int min_threads) {
616
4
    std::lock_guard<std::mutex> l(_lock);
617
4
    if (min_threads > _max_threads) {
618
        // min threads can not be set greater than max threads
619
1
        return Status::InternalError("set thread pool {} min_threads failed", _name);
620
1
    }
621
3
    _min_threads = min_threads;
622
3
    if (min_threads > _num_threads + _num_threads_pending_start) {
623
0
        int addition_threads = min_threads - _num_threads - _num_threads_pending_start;
624
0
        _num_threads_pending_start += addition_threads;
625
0
        for (int i = 0; i < addition_threads; i++) {
626
0
            Status status = create_thread();
627
0
            if (!status.ok()) {
628
0
                _num_threads_pending_start--;
629
0
                LOG(WARNING) << "Thread pool " << _name
630
0
                             << " failed to create thread: " << status.to_string();
631
0
                return status;
632
0
            }
633
0
        }
634
0
    }
635
3
    return Status::OK();
636
3
}
637
638
7
Status ThreadPool::set_max_threads(int max_threads) {
639
7
    std::lock_guard<std::mutex> l(_lock);
640
7
    if (_min_threads > max_threads) {
641
        // max threads can not be set less than min threads
642
1
        return Status::InternalError("set thread pool {} max_threads failed", _name);
643
1
    }
644
645
6
    _max_threads = max_threads;
646
6
    if (_max_threads > _num_threads + _num_threads_pending_start) {
647
4
        int addition_threads = _max_threads - _num_threads - _num_threads_pending_start;
648
4
        addition_threads = std::min(addition_threads, _total_queued_tasks);
649
4
        _num_threads_pending_start += addition_threads;
650
7
        for (int i = 0; i < addition_threads; i++) {
651
3
            Status status = create_thread();
652
3
            if (!status.ok()) {
653
0
                _num_threads_pending_start--;
654
0
                LOG(WARNING) << "Thread pool " << _name
655
0
                             << " failed to create thread: " << status.to_string();
656
0
                return status;
657
0
            }
658
3
        }
659
4
    }
660
6
    return Status::OK();
661
6
}
662
663
0
std::ostream& operator<<(std::ostream& o, ThreadPoolToken::State s) {
664
0
    return o << ThreadPoolToken::state_to_string(s);
665
0
}
666
667
} // namespace doris