1 | // Copyright (C) 2018 The Qt Company Ltd. |
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | #include "qxcbeventqueue.h" |
4 | #include "qxcbconnection.h" |
5 | |
6 | #include <QtCore/QObject> |
7 | #include <QtCore/QCoreApplication> |
8 | #include <QtCore/QAbstractEventDispatcher> |
9 | #include <QtCore/QMutex> |
10 | #include <QtCore/QDebug> |
11 | |
12 | QT_BEGIN_NAMESPACE |
13 | |
14 | Q_CONSTINIT static QBasicMutex qAppExiting; |
15 | Q_CONSTINIT static bool dispatcherOwnerDestructing = false; |
16 | |
17 | /*! |
18 | \class QXcbEventQueue |
19 | \internal |
20 | |
21 | Lock-free event passing: |
22 | |
23 | The lock-free solution uses a singly-linked list to pass events from the |
24 | reader thread to the main thread. An atomic operation is used to sync the |
25 | tail node of the list between threads. The reader thread takes special care |
26 | when accessing the tail node. It does not dequeue the last node and does not |
27 | access (read or write) the tail node's 'next' member. This lets the reader |
28 | add more items at the same time as the main thread is dequeuing nodes from |
29 | the head. A custom linked list implementation is used, because std::list |
30 | does not have any thread-safety guarantees. The custom list is |
31 | lightweight - no reference counting, back links, etc. |
32 | |
33 | Memory management: |
34 | |
35 | In a normally functioning application, XCB plugin won't buffer more than few |
36 | batches of events, couple events per batch. Instead of constantly calling |
37 | new / delete, we can create a pool of nodes that we reuse. The main thread |
38 | uses an atomic operation to sync how many nodes have been restored (available |
39 | for reuse). If at some point a user application will block the main thread |
40 | for a long time, we might run out of nodes in the pool. Then we create nodes |
41 | on a heap. These will be automatically "garbage collected" out of the linked |
42 | list, once the main thread stops blocking. |
43 | */ |
44 | |
45 | QXcbEventQueue::QXcbEventQueue(QXcbConnection *connection) |
46 | : m_connection(connection) |
47 | { |
48 | // When running test cases in auto tests, static variables are preserved |
49 | // between test function runs, even if Q*Application object is destroyed. |
50 | // Reset to default value to account for this. |
51 | dispatcherOwnerDestructing = false; |
52 | qAddPostRoutine([]() { |
53 | QMutexLocker locker(&qAppExiting); |
54 | dispatcherOwnerDestructing = true; |
55 | }); |
56 | |
57 | // Lets init the list with one node, so we don't have to check for |
58 | // this special case in various places. |
59 | m_head = m_flushedTail = qXcbEventNodeFactory(event: nullptr); |
60 | m_tail.store(p: m_head, m: std::memory_order_release); |
61 | |
62 | start(); |
63 | } |
64 | |
65 | QXcbEventQueue::~QXcbEventQueue() |
66 | { |
67 | if (isRunning()) { |
68 | sendCloseConnectionEvent(); |
69 | wait(); |
70 | } |
71 | |
72 | flushBufferedEvents(); |
73 | while (xcb_generic_event_t *event = takeFirst(flags: QEventLoop::AllEvents)) |
74 | free(ptr: event); |
75 | |
76 | if (m_head && m_head->fromHeap) |
77 | delete m_head; // the deferred node |
78 | |
79 | qCDebug(lcQpaEventReader) << "nodes on heap:" << m_nodesOnHeap; |
80 | } |
81 | |
82 | xcb_generic_event_t *QXcbEventQueue::takeFirst(QEventLoop::ProcessEventsFlags flags) |
83 | { |
84 | // This is the level at which we were moving excluded user input events into |
85 | // separate queue in Qt 4 (see qeventdispatcher_x11.cpp). In this case |
86 | // QXcbEventQueue represents Xlib's internal event queue. In Qt 4, Xlib's |
87 | // event queue peeking APIs would not see these events anymore, the same way |
88 | // our peeking functions do not consider m_inputEvents. This design is |
89 | // intentional to keep the same behavior. We could do filtering directly on |
90 | // QXcbEventQueue, without the m_inputEvents, but it is not clear if it is |
91 | // needed by anyone who peeks at the native event queue. |
92 | |
93 | bool excludeUserInputEvents = flags.testFlag(flag: QEventLoop::ExcludeUserInputEvents); |
94 | if (excludeUserInputEvents) { |
95 | xcb_generic_event_t *event = nullptr; |
96 | while ((event = takeFirst())) { |
97 | if (m_connection->isUserInputEvent(event)) { |
98 | m_inputEvents << event; |
99 | continue; |
100 | } |
101 | break; |
102 | } |
103 | return event; |
104 | } |
105 | |
106 | if (!m_inputEvents.isEmpty()) |
107 | return m_inputEvents.takeFirst(); |
108 | return takeFirst(); |
109 | } |
110 | |
111 | xcb_generic_event_t *QXcbEventQueue::takeFirst() |
112 | { |
113 | if (isEmpty()) |
114 | return nullptr; |
115 | |
116 | xcb_generic_event_t *event = nullptr; |
117 | do { |
118 | event = m_head->event; |
119 | if (m_head == m_flushedTail) { |
120 | // defer dequeuing until next successful flush of events |
121 | if (event) // check if not cleared already by some filter |
122 | m_head->event = nullptr; // if not, clear it |
123 | } else { |
124 | dequeueNode(); |
125 | if (!event) |
126 | continue; // consumed by filter or deferred node |
127 | } |
128 | } while (!isEmpty() && !event); |
129 | |
130 | m_queueModified = m_peekerIndexCacheDirty = true; |
131 | |
132 | return event; |
133 | } |
134 | |
135 | void QXcbEventQueue::dequeueNode() |
136 | { |
137 | QXcbEventNode *node = m_head; |
138 | m_head = m_head->next; |
139 | if (node->fromHeap) |
140 | delete node; |
141 | else |
142 | m_nodesRestored.fetch_add(i: 1, m: std::memory_order_release); |
143 | } |
144 | |
145 | void QXcbEventQueue::flushBufferedEvents() |
146 | { |
147 | m_flushedTail = m_tail.load(m: std::memory_order_acquire); |
148 | } |
149 | |
150 | QXcbEventNode *QXcbEventQueue::qXcbEventNodeFactory(xcb_generic_event_t *event) |
151 | { |
152 | static QXcbEventNode qXcbNodePool[PoolSize]; |
153 | |
154 | if (m_freeNodes == 0) // out of nodes, check if the main thread has released any |
155 | m_freeNodes = m_nodesRestored.exchange(i: 0, m: std::memory_order_acquire); |
156 | |
157 | if (m_freeNodes) { |
158 | m_freeNodes--; |
159 | if (m_poolIndex == PoolSize) { |
160 | // wrap back to the beginning, we always take and restore nodes in-order |
161 | m_poolIndex = 0; |
162 | } |
163 | QXcbEventNode *node = &qXcbNodePool[m_poolIndex++]; |
164 | node->event = event; |
165 | node->next = nullptr; |
166 | return node; |
167 | } |
168 | |
169 | // the main thread is not flushing events and thus the pool has become empty |
170 | auto node = new QXcbEventNode(event); |
171 | node->fromHeap = true; |
172 | qCDebug(lcQpaEventReader) << "[heap] " << m_nodesOnHeap++; |
173 | return node; |
174 | } |
175 | |
176 | void QXcbEventQueue::run() |
177 | { |
178 | xcb_generic_event_t *event = nullptr; |
179 | xcb_connection_t *connection = m_connection->xcb_connection(); |
180 | QXcbEventNode *tail = m_head; |
181 | |
182 | auto enqueueEvent = [&tail, this](xcb_generic_event_t *event) { |
183 | if (!isCloseConnectionEvent(event)) { |
184 | tail->next = qXcbEventNodeFactory(event); |
185 | tail = tail->next; |
186 | m_tail.store(p: tail, m: std::memory_order_release); |
187 | } else { |
188 | free(ptr: event); |
189 | } |
190 | }; |
191 | |
192 | while (!m_closeConnectionDetected && (event = xcb_wait_for_event(c: connection))) { |
193 | // This lock can block only if there are users of waitForNewEvents(). |
194 | // Currently only the clipboard implementation relies on it. |
195 | m_newEventsMutex.lock(); |
196 | enqueueEvent(event); |
197 | while (!m_closeConnectionDetected && (event = xcb_poll_for_queued_event(c: connection))) |
198 | enqueueEvent(event); |
199 | |
200 | m_newEventsCondition.wakeOne(); |
201 | m_newEventsMutex.unlock(); |
202 | wakeUpDispatcher(); |
203 | } |
204 | |
205 | if (!m_closeConnectionDetected) { |
206 | // Connection was terminated not by us. Wake up dispatcher, which will |
207 | // call processXcbEvents(), where we handle the connection errors via |
208 | // xcb_connection_has_error(). |
209 | wakeUpDispatcher(); |
210 | } |
211 | } |
212 | |
213 | void QXcbEventQueue::wakeUpDispatcher() |
214 | { |
215 | QMutexLocker locker(&qAppExiting); |
216 | if (!dispatcherOwnerDestructing) { |
217 | // This thread can run before a dispatcher has been created, |
218 | // so check if it is ready. |
219 | if (QCoreApplication::eventDispatcher()) |
220 | QCoreApplication::eventDispatcher()->wakeUp(); |
221 | } |
222 | } |
223 | |
224 | qint32 QXcbEventQueue::generatePeekerId() |
225 | { |
226 | const qint32 peekerId = m_peekerIdSource++; |
227 | m_peekerToNode.insert(key: peekerId, value: nullptr); |
228 | return peekerId; |
229 | } |
230 | |
231 | bool QXcbEventQueue::removePeekerId(qint32 peekerId) |
232 | { |
233 | const auto it = m_peekerToNode.constFind(key: peekerId); |
234 | if (it == m_peekerToNode.constEnd()) { |
235 | qCWarning(lcQpaXcb, "failed to remove unknown peeker id: %d" , peekerId); |
236 | return false; |
237 | } |
238 | m_peekerToNode.erase(it); |
239 | if (m_peekerToNode.isEmpty()) { |
240 | m_peekerIdSource = 0; // Once the hash becomes empty, we can start reusing IDs |
241 | m_peekerIndexCacheDirty = false; |
242 | } |
243 | return true; |
244 | } |
245 | |
246 | bool QXcbEventQueue::peekEventQueue(PeekerCallback peeker, void *peekerData, |
247 | PeekOptions option, qint32 peekerId) |
248 | { |
249 | const bool peekerIdProvided = peekerId != -1; |
250 | auto peekerToNodeIt = m_peekerToNode.find(key: peekerId); |
251 | |
252 | if (peekerIdProvided && peekerToNodeIt == m_peekerToNode.end()) { |
253 | qCWarning(lcQpaXcb, "failed to find index for unknown peeker id: %d" , peekerId); |
254 | return false; |
255 | } |
256 | |
257 | const bool useCache = option.testFlag(flag: PeekOption::PeekFromCachedIndex); |
258 | if (useCache && !peekerIdProvided) { |
259 | qCWarning(lcQpaXcb, "PeekOption::PeekFromCachedIndex requires peeker id" ); |
260 | return false; |
261 | } |
262 | |
263 | if (peekerIdProvided && m_peekerIndexCacheDirty) { |
264 | for (auto &node : m_peekerToNode) // reset cache |
265 | node = nullptr; |
266 | m_peekerIndexCacheDirty = false; |
267 | } |
268 | |
269 | flushBufferedEvents(); |
270 | if (isEmpty()) |
271 | return false; |
272 | |
273 | const auto startNode = [this, useCache, peekerToNodeIt]() -> QXcbEventNode * { |
274 | if (useCache) { |
275 | const QXcbEventNode *cachedNode = peekerToNodeIt.value(); |
276 | if (!cachedNode) |
277 | return m_head; // cache was reset |
278 | if (cachedNode == m_flushedTail) |
279 | return nullptr; // no new events since the last call |
280 | return cachedNode->next; |
281 | } |
282 | return m_head; |
283 | }(); |
284 | |
285 | if (!startNode) |
286 | return false; |
287 | |
288 | // A peeker may call QCoreApplication::processEvents(), which will cause |
289 | // QXcbConnection::processXcbEvents() to modify the queue we are currently |
290 | // looping through; |
291 | m_queueModified = false; |
292 | bool result = false; |
293 | |
294 | QXcbEventNode *node = startNode; |
295 | do { |
296 | xcb_generic_event_t *event = node->event; |
297 | if (event && peeker(event, peekerData)) { |
298 | result = true; |
299 | break; |
300 | } |
301 | if (node == m_flushedTail) |
302 | break; |
303 | node = node->next; |
304 | } while (!m_queueModified); |
305 | |
306 | // Update the cached index if the queue was not modified, and hence the |
307 | // cache is still valid. |
308 | if (peekerIdProvided && node != startNode && !m_queueModified) { |
309 | // Before updating, make sure that a peeker callback did not remove |
310 | // the peeker id. |
311 | peekerToNodeIt = m_peekerToNode.find(key: peekerId); |
312 | if (peekerToNodeIt != m_peekerToNode.end()) |
313 | *peekerToNodeIt = node; // id still in the cache, update node |
314 | } |
315 | |
316 | return result; |
317 | } |
318 | |
319 | void QXcbEventQueue::waitForNewEvents(const QXcbEventNode *sinceFlushedTail, |
320 | unsigned long time) |
321 | { |
322 | QMutexLocker locker(&m_newEventsMutex); |
323 | flushBufferedEvents(); |
324 | if (sinceFlushedTail != m_flushedTail) |
325 | return; |
326 | m_newEventsCondition.wait(lockedMutex: &m_newEventsMutex, time); |
327 | } |
328 | |
329 | void QXcbEventQueue::sendCloseConnectionEvent() const |
330 | { |
331 | // A hack to close XCB connection. Apparently XCB does not have any APIs for this? |
332 | xcb_client_message_event_t event; |
333 | memset(s: &event, c: 0, n: sizeof(event)); |
334 | |
335 | xcb_connection_t *c = m_connection->xcb_connection(); |
336 | const xcb_window_t window = xcb_generate_id(c); |
337 | xcb_screen_iterator_t it = xcb_setup_roots_iterator(R: m_connection->setup()); |
338 | xcb_screen_t *screen = it.data; |
339 | xcb_create_window(c, XCB_COPY_FROM_PARENT, |
340 | wid: window, parent: screen->root, |
341 | x: 0, y: 0, width: 1, height: 1, border_width: 0, class: XCB_WINDOW_CLASS_INPUT_ONLY, |
342 | visual: screen->root_visual, value_mask: 0, value_list: nullptr); |
343 | |
344 | event.response_type = XCB_CLIENT_MESSAGE; |
345 | event.format = 32; |
346 | event.sequence = 0; |
347 | event.window = window; |
348 | event.type = m_connection->atom(qatom: QXcbAtom::Atom_QT_CLOSE_CONNECTION); |
349 | event.data.data32[0] = 0; |
350 | |
351 | xcb_send_event(c, propagate: false, destination: window, event_mask: XCB_EVENT_MASK_NO_EVENT, event: reinterpret_cast<const char *>(&event)); |
352 | xcb_destroy_window(c, window); |
353 | xcb_flush(c); |
354 | } |
355 | |
356 | bool QXcbEventQueue::isCloseConnectionEvent(const xcb_generic_event_t *event) |
357 | { |
358 | if (event && (event->response_type & ~0x80) == XCB_CLIENT_MESSAGE) { |
359 | auto clientMessage = reinterpret_cast<const xcb_client_message_event_t *>(event); |
360 | if (clientMessage->type == m_connection->atom(qatom: QXcbAtom::Atom_QT_CLOSE_CONNECTION)) |
361 | m_closeConnectionDetected = true; |
362 | } |
363 | return m_closeConnectionDetected; |
364 | } |
365 | |
366 | QT_END_NAMESPACE |
367 | |
368 | #include "moc_qxcbeventqueue.cpp" |
369 | |