1 | //===--- Threading.cpp - Abstractions for multithreading ------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "support/Threading.h" |
10 | #include "support/Trace.h" |
11 | #include "llvm/ADT/ScopeExit.h" |
12 | #include "llvm/Support/Threading.h" |
13 | #include "llvm/Support/thread.h" |
14 | #include <atomic> |
15 | #include <optional> |
16 | #include <thread> |
17 | #ifdef __USE_POSIX |
18 | #include <pthread.h> |
19 | #elif defined(__APPLE__) |
20 | #include <sys/resource.h> |
21 | #elif defined(_WIN32) |
22 | #include <windows.h> |
23 | #endif |
24 | |
25 | namespace clang { |
26 | namespace clangd { |
27 | |
28 | void Notification::notify() { |
29 | { |
30 | std::lock_guard<std::mutex> Lock(Mu); |
31 | Notified = true; |
32 | // Broadcast with the lock held. This ensures that it's safe to destroy |
33 | // a Notification after wait() returns, even from another thread. |
34 | CV.notify_all(); |
35 | } |
36 | } |
37 | |
38 | bool Notification::wait(Deadline D) const { |
39 | std::unique_lock<std::mutex> Lock(Mu); |
40 | return clangd::wait(Lock, CV, D, F: [&] { return Notified; }); |
41 | } |
42 | |
43 | Semaphore::Semaphore(std::size_t MaxLocks) : FreeSlots(MaxLocks) {} |
44 | |
45 | bool Semaphore::try_lock() { |
46 | std::unique_lock<std::mutex> Lock(Mutex); |
47 | if (FreeSlots > 0) { |
48 | --FreeSlots; |
49 | return true; |
50 | } |
51 | return false; |
52 | } |
53 | |
54 | void Semaphore::lock() { |
55 | trace::Span Span("WaitForFreeSemaphoreSlot" ); |
56 | // trace::Span can also acquire locks in ctor and dtor, we make sure it |
57 | // happens when Semaphore's own lock is not held. |
58 | { |
59 | std::unique_lock<std::mutex> Lock(Mutex); |
60 | SlotsChanged.wait(lock&: Lock, p: [&]() { return FreeSlots > 0; }); |
61 | --FreeSlots; |
62 | } |
63 | } |
64 | |
65 | void Semaphore::unlock() { |
66 | std::unique_lock<std::mutex> Lock(Mutex); |
67 | ++FreeSlots; |
68 | Lock.unlock(); |
69 | |
70 | SlotsChanged.notify_one(); |
71 | } |
72 | |
73 | AsyncTaskRunner::~AsyncTaskRunner() { wait(); } |
74 | |
75 | bool AsyncTaskRunner::wait(Deadline D) const { |
76 | std::unique_lock<std::mutex> Lock(Mutex); |
77 | return clangd::wait(Lock, CV&: TasksReachedZero, D, |
78 | F: [&] { return InFlightTasks == 0; }); |
79 | } |
80 | |
81 | void AsyncTaskRunner::runAsync(const llvm::Twine &Name, |
82 | llvm::unique_function<void()> Action) { |
83 | { |
84 | std::lock_guard<std::mutex> Lock(Mutex); |
85 | ++InFlightTasks; |
86 | } |
87 | |
88 | auto CleanupTask = llvm::make_scope_exit(F: [this]() { |
89 | std::lock_guard<std::mutex> Lock(Mutex); |
90 | int NewTasksCnt = --InFlightTasks; |
91 | if (NewTasksCnt == 0) { |
92 | // Note: we can't unlock here because we don't want the object to be |
93 | // destroyed before we notify. |
94 | TasksReachedZero.notify_one(); |
95 | } |
96 | }); |
97 | |
98 | auto Task = [Name = Name.str(), Action = std::move(Action), |
99 | Cleanup = std::move(CleanupTask)]() mutable { |
100 | llvm::set_thread_name(Name); |
101 | Action(); |
102 | // Make sure function stored by ThreadFunc is destroyed before Cleanup runs. |
103 | Action = nullptr; |
104 | }; |
105 | |
106 | // Ensure our worker threads have big enough stacks to run clang. |
107 | llvm::thread Thread( |
108 | /*clang::DesiredStackSize*/ std::optional<unsigned>(8 << 20), |
109 | std::move(Task)); |
110 | Thread.detach(); |
111 | } |
112 | |
113 | Deadline timeoutSeconds(std::optional<double> Seconds) { |
114 | using namespace std::chrono; |
115 | if (!Seconds) |
116 | return Deadline::infinity(); |
117 | return steady_clock::now() + |
118 | duration_cast<steady_clock::duration>(d: duration<double>(*Seconds)); |
119 | } |
120 | |
121 | void wait(std::unique_lock<std::mutex> &Lock, std::condition_variable &CV, |
122 | Deadline D) { |
123 | if (D == Deadline::zero()) |
124 | return; |
125 | if (D == Deadline::infinity()) |
126 | return CV.wait(lock&: Lock); |
127 | CV.wait_until(lock&: Lock, atime: D.time()); |
128 | } |
129 | |
130 | bool PeriodicThrottler::operator()() { |
131 | Rep Now = Stopwatch::now().time_since_epoch().count(); |
132 | Rep OldNext = Next.load(m: std::memory_order_acquire); |
133 | if (Now < OldNext) |
134 | return false; |
135 | // We're ready to run (but may be racing other threads). |
136 | // Work out the updated target time, and run if we successfully bump it. |
137 | Rep NewNext = Now + Period; |
138 | return Next.compare_exchange_strong(i1&: OldNext, i2: NewNext, |
139 | m: std::memory_order_acq_rel); |
140 | } |
141 | |
142 | } // namespace clangd |
143 | } // namespace clang |
144 | |