blob: 49805ce193ed07e4bd58fe75cfcab4f8419acb8d [file] [log] [blame]
Andrew Walbran3d2c1972020-04-07 12:24:26 +01001//===--------------------- TaskPool.h ---------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef utility_TaskPool_h_
10#define utility_TaskPool_h_
11
12#include "llvm/ADT/STLExtras.h"
13#include <functional>
14#include <future>
15#include <list>
16#include <memory>
17#include <mutex>
18#include <type_traits>
19
20namespace lldb_private {
21
22// Global TaskPool class for running tasks in parallel on a set of worker
23// thread created the first time the task pool is used. The TaskPool provide no
24// guarantee about the order the task will be run and about what tasks will run
25// in parallel. None of the task added to the task pool should block on
26// something (mutex, future, condition variable) what will be set only by the
27// completion of an other task on the task pool as they may run on the same
28// thread sequentally.
29class TaskPool {
30public:
31 // Add a new task to the task pool and return a std::future belonging to the
32 // newly created task. The caller of this function has to wait on the future
33 // for this task to complete.
34 template <typename F, typename... Args>
35 static std::future<typename std::result_of<F(Args...)>::type>
36 AddTask(F &&f, Args &&... args);
37
38 // Run all of the specified tasks on the task pool and wait until all of them
39 // are finished before returning. This method is intended to be used for
40 // small number tasks where listing them as function arguments is acceptable.
41 // For running large number of tasks you should use AddTask for each task and
42 // then call wait() on each returned future.
43 template <typename... T> static void RunTasks(T &&... tasks);
44
45private:
46 TaskPool() = delete;
47
48 template <typename... T> struct RunTaskImpl;
49
50 static void AddTaskImpl(std::function<void()> &&task_fn);
51};
52
53template <typename F, typename... Args>
54std::future<typename std::result_of<F(Args...)>::type>
55TaskPool::AddTask(F &&f, Args &&... args) {
56 auto task_sp = std::make_shared<
57 std::packaged_task<typename std::result_of<F(Args...)>::type()>>(
58 std::bind(std::forward<F>(f), std::forward<Args>(args)...));
59
60 AddTaskImpl([task_sp]() { (*task_sp)(); });
61
62 return task_sp->get_future();
63}
64
65template <typename... T> void TaskPool::RunTasks(T &&... tasks) {
66 RunTaskImpl<T...>::Run(std::forward<T>(tasks)...);
67}
68
69template <typename Head, typename... Tail>
70struct TaskPool::RunTaskImpl<Head, Tail...> {
71 static void Run(Head &&h, Tail &&... t) {
72 auto f = AddTask(std::forward<Head>(h));
73 RunTaskImpl<Tail...>::Run(std::forward<Tail>(t)...);
74 f.wait();
75 }
76};
77
78template <> struct TaskPool::RunTaskImpl<> {
79 static void Run() {}
80};
81
82// Run 'func' on every value from begin .. end-1. Each worker will grab
83// 'batch_size' numbers at a time to work on, so for very fast functions, batch
84// should be large enough to avoid too much cache line contention.
85void TaskMapOverInt(size_t begin, size_t end,
86 const llvm::function_ref<void(size_t)> &func);
87
88unsigned GetHardwareConcurrencyHint();
89
90} // namespace lldb_private
91
92#endif // #ifndef utility_TaskPool_h_