1 | use std::any::Any; |
2 | use std::collections::VecDeque; |
3 | use std::convert::Infallible; |
4 | use std::marker::PhantomData; |
5 | use std::os::unix::io::{AsFd, BorrowedFd, OwnedFd}; |
6 | use std::sync::{atomic::Ordering, Arc, Condvar, Mutex}; |
7 | use std::task; |
8 | |
9 | use wayland_backend::{ |
10 | client::{Backend, ObjectData, ObjectId, ReadEventsGuard, WaylandError}, |
11 | protocol::{Argument, Message}, |
12 | }; |
13 | |
14 | use crate::{conn::SyncData, Connection, DispatchError, Proxy}; |
15 | |
16 | /// A trait for handlers of proxies' events delivered to an [`EventQueue`]. |
17 | /// |
18 | /// ## General usage |
19 | /// |
20 | /// You need to implement this trait on your `State` for every type of Wayland object that will be processed |
21 | /// by the [`EventQueue`] working with your `State`. |
22 | /// |
23 | /// You can have different implementations of the trait for the same interface but different `UserData` type. |
24 | /// This way the events for a given object will be processed by the adequate implementation depending on |
25 | /// which `UserData` was assigned to it at creation. |
26 | /// |
27 | /// The way this trait works is that the [`Dispatch::event()`] method will be invoked by the event queue for |
28 | /// every event received by an object associated to this event queue. Your implementation can then match on |
29 | /// the associated [`Proxy::Event`] enum and do any processing needed with that event. |
30 | /// |
31 | /// In the rare case of an interface with *events* creating new objects (in the core protocol, the only |
32 | /// instance of this is the `wl_data_device.data_offer` event), you'll need to implement the |
33 | /// [`Dispatch::event_created_child()`] method. See the [`event_created_child!`](macro.event_created_child.html) macro |
34 | /// for a simple way to do this. |
35 | /// |
36 | /// ## Modularity |
37 | /// |
38 | /// To provide generic handlers for downstream usage, it is possible to make an implementation of the trait |
39 | /// that is generic over the last type argument, as illustrated below. Users will then be able to |
40 | /// automatically delegate their implementation to yours using the [`delegate_dispatch!`] macro. |
41 | /// |
42 | /// As a result, when your implementation is instantiated, the last type parameter `State` will be the state |
43 | /// struct of the app using your generic implementation. You can put additional trait constraints on it to |
44 | /// specify an interface between your module and downstream code, as illustrated in this example: |
45 | /// |
46 | /// ``` |
47 | /// # // Maintainers: If this example changes, please make sure you also carry those changes over to the delegate_dispatch macro. |
48 | /// use wayland_client::{protocol::wl_registry, Dispatch}; |
49 | /// |
50 | /// /// The type we want to delegate to |
51 | /// struct DelegateToMe; |
52 | /// |
53 | /// /// The user data relevant for your implementation. |
54 | /// /// When providing a delegate implementation, it is recommended to use your own type here, even if it is |
55 | /// /// just a unit struct: using () would cause a risk of clashing with another such implementation. |
56 | /// struct MyUserData; |
57 | /// |
58 | /// // Now a generic implementation of Dispatch, we are generic over the last type argument instead of using |
59 | /// // the default State=Self. |
60 | /// impl<State> Dispatch<wl_registry::WlRegistry, MyUserData, State> for DelegateToMe |
61 | /// where |
62 | /// // State is the type which has delegated to this type, so it needs to have an impl of Dispatch itself |
63 | /// State: Dispatch<wl_registry::WlRegistry, MyUserData>, |
64 | /// // If your delegate type has some internal state, it'll need to access it, and you can |
65 | /// // require it by adding custom trait bounds. |
66 | /// // In this example, we just require an AsMut implementation |
67 | /// State: AsMut<DelegateToMe>, |
68 | /// { |
69 | /// fn event( |
70 | /// state: &mut State, |
71 | /// _proxy: &wl_registry::WlRegistry, |
72 | /// _event: wl_registry::Event, |
73 | /// _udata: &MyUserData, |
74 | /// _conn: &wayland_client::Connection, |
75 | /// _qhandle: &wayland_client::QueueHandle<State>, |
76 | /// ) { |
77 | /// // Here the delegate may handle incoming events as it pleases. |
78 | /// |
79 | /// // For example, it retrives its state and does some processing with it |
80 | /// let me: &mut DelegateToMe = state.as_mut(); |
81 | /// // do something with `me` ... |
82 | /// # std::mem::drop(me) // use `me` to avoid a warning |
83 | /// } |
84 | /// } |
85 | /// ``` |
86 | /// |
87 | /// **Note:** Due to limitations in Rust's trait resolution algorithm, a type providing a generic |
88 | /// implementation of [`Dispatch`] cannot be used directly as the dispatching state, as rustc |
89 | /// currently fails to understand that it also provides `Dispatch<I, U, Self>` (assuming all other |
90 | /// trait bounds are respected as well). |
91 | /// |
92 | /// [`delegate_dispatch!`]: crate::delegate_dispatch |
93 | pub trait Dispatch<I, UserData, State = Self> |
94 | where |
95 | Self: Sized, |
96 | I: Proxy, |
97 | State: Dispatch<I, UserData, State>, |
98 | { |
99 | /// Called when an event from the server is processed |
100 | /// |
101 | /// This method contains your logic for processing events, which can vary wildly from an object to the |
102 | /// other. You are given as argument: |
103 | /// |
104 | /// - a proxy representing the object that received this event |
105 | /// - the event itself as the [`Proxy::Event`] enum (which you'll need to match against) |
106 | /// - a reference to the `UserData` that was associated with that object on creation |
107 | /// - a reference to the [`Connection`] in case you need to access it |
108 | /// - a reference to a [`QueueHandle`] associated with the [`EventQueue`] currently processing events, in |
109 | /// case you need to create new objects that you want associated to the same [`EventQueue`]. |
110 | fn event( |
111 | state: &mut State, |
112 | proxy: &I, |
113 | event: I::Event, |
114 | data: &UserData, |
115 | conn: &Connection, |
116 | qhandle: &QueueHandle<State>, |
117 | ); |
118 | |
119 | /// Method used to initialize the user-data of objects created by events |
120 | /// |
121 | /// If the interface does not have any such event, you can ignore it. If not, the |
122 | /// [`event_created_child!`](macro.event_created_child.html) macro is provided for overriding it. |
123 | #[cfg_attr (coverage, coverage(off))] |
124 | fn event_created_child(opcode: u16, _qhandle: &QueueHandle<State>) -> Arc<dyn ObjectData> { |
125 | panic!( |
126 | "Missing event_created_child specialization for event opcode {} of {}" , |
127 | opcode, |
128 | I::interface().name |
129 | ); |
130 | } |
131 | } |
132 | |
133 | /// Macro used to override [`Dispatch::event_created_child()`] |
134 | /// |
135 | /// Use this macro inside the [`Dispatch`] implementation to override this method, to implement the |
136 | /// initialization of the user data for event-created objects. The usage syntax is as follow: |
137 | /// |
138 | /// ```ignore |
139 | /// impl Dispatch<WlFoo, FooUserData> for MyState { |
140 | /// fn event( |
141 | /// &mut self, |
142 | /// proxy: &WlFoo, |
143 | /// event: FooEvent, |
144 | /// data: &FooUserData, |
145 | /// connhandle: &mut ConnectionHandle, |
146 | /// qhandle: &QueueHandle<MyState> |
147 | /// ) { |
148 | /// /* ... */ |
149 | /// } |
150 | /// |
151 | /// event_created_child!(MyState, WlFoo, [ |
152 | /// // there can be multiple lines if this interface has multiple object-creating event |
153 | /// EVT_CREATE_BAR => (WlBar, BarUserData::new()), |
154 | /// // ~~~~~~~~~~~~~~ ~~~~~ ~~~~~~~~~~~~~~~~~~ |
155 | /// // | | | |
156 | /// // | | +-- an expression whose evaluation produces the |
157 | /// // | | user data value |
158 | /// // | +-- the type of the newly created object |
159 | /// // +-- the opcode of the event that creates a new object, constants for those are |
160 | /// // generated alongside the `WlFoo` type in the `wl_foo` module |
161 | /// ]); |
162 | /// } |
163 | /// ``` |
164 | #[macro_export ] |
165 | macro_rules! event_created_child { |
166 | // Must match `pat` to allow paths `wl_data_device::EVT_DONE_OPCODE` and expressions `0` to both work. |
167 | ($(@< $( $lt:tt $( : $clt:tt $(+ $dlt:tt )* )? ),+ >)? $selftype:ty, $iface:ty, [$($opcode:pat => ($child_iface:ty, $child_udata:expr)),* $(,)?]) => { |
168 | fn event_created_child( |
169 | opcode: u16, |
170 | qhandle: &$crate::QueueHandle<$selftype> |
171 | ) -> std::sync::Arc<dyn $crate::backend::ObjectData> { |
172 | match opcode { |
173 | $( |
174 | $opcode => { |
175 | qhandle.make_data::<$child_iface, _>({$child_udata}) |
176 | }, |
177 | )* |
178 | _ => { |
179 | panic!("Missing event_created_child specialization for event opcode {} of {}" , opcode, <$iface as $crate::Proxy>::interface().name); |
180 | }, |
181 | } |
182 | } |
183 | }; |
184 | } |
185 | |
186 | type QueueCallback<State> = fn( |
187 | &Connection, |
188 | Message<ObjectId, OwnedFd>, |
189 | &mut State, |
190 | Arc<dyn ObjectData>, |
191 | &QueueHandle<State>, |
192 | ) -> Result<(), DispatchError>; |
193 | |
194 | struct QueueEvent<State>(QueueCallback<State>, Message<ObjectId, OwnedFd>, Arc<dyn ObjectData>); |
195 | |
196 | impl<State> std::fmt::Debug for QueueEvent<State> { |
197 | #[cfg_attr (coverage, coverage(off))] |
198 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
199 | f.debug_struct("QueueEvent" ).field(name:"msg" , &self.1).finish_non_exhaustive() |
200 | } |
201 | } |
202 | |
203 | /// An event queue |
204 | /// |
205 | /// This is an abstraction for handling event dispatching, that allows you to ensure |
206 | /// access to some common state `&mut State` to your event handlers. |
207 | /// |
208 | /// Event queues are created through [`Connection::new_event_queue()`](crate::Connection::new_event_queue). |
209 | /// |
210 | /// Upon creation, a wayland object is assigned to an event queue by passing the associated [`QueueHandle`] |
211 | /// as argument to the method creating it. All events received by that object will be processed by that event |
212 | /// queue, when [`dispatch_pending()`](EventQueue::dispatch_pending) or |
213 | /// [`blocking_dispatch()`](EventQueue::blocking_dispatch) is invoked. |
214 | /// |
215 | /// ## Usage |
216 | /// |
217 | /// ### Single queue app |
218 | /// |
219 | /// If your app is simple enough that the only source of event to process is the Wayland socket and you only |
220 | /// need a single event queue, your main loop can be as simple as this: |
221 | /// |
222 | /// ```rust,no_run |
223 | /// use wayland_client::Connection; |
224 | /// |
225 | /// let connection = Connection::connect_to_env().unwrap(); |
226 | /// let mut event_queue = connection.new_event_queue(); |
227 | /// |
228 | /// /* |
229 | /// * Here your initial setup |
230 | /// */ |
231 | /// # struct State { |
232 | /// # exit: bool |
233 | /// # } |
234 | /// # let mut state = State { exit: false }; |
235 | /// |
236 | /// // And the main loop: |
237 | /// while !state.exit { |
238 | /// event_queue.blocking_dispatch(&mut state).unwrap(); |
239 | /// } |
240 | /// ``` |
241 | /// |
242 | /// The [`blocking_dispatch()`](EventQueue::blocking_dispatch) will wait (by putting the thread to sleep) |
243 | /// until there are some events from the server that can be processed, and all your actual app logic can be |
244 | /// done in the callbacks of the [`Dispatch`] implementations, and in the main `loop` after the |
245 | /// `blocking_dispatch()` call. |
246 | /// |
247 | /// ### Multi-thread multi-queue app |
248 | /// |
249 | /// In a case where you app is multithreaded and you want to process events in multiple thread, a simple |
250 | /// pattern is to have one [`EventQueue`] per thread processing Wayland events. |
251 | /// |
252 | /// With this pattern, each thread can use [`EventQueue::blocking_dispatch()`](EventQueue::blocking_dispatch |
253 | /// on its own event loop, and everything will "Just Work". |
254 | /// |
255 | /// ### Single-queue guest library |
256 | /// |
257 | /// If your code is some library code that will act on a Wayland connection shared by the main program, it is |
258 | /// likely you should not trigger socket reads yourself and instead let the main app take care of it. In this |
259 | /// case, to ensure your [`EventQueue`] still makes progress, you should regularly invoke |
260 | /// [`EventQueue::dispatch_pending()`](EventQueue::dispatch_pending) which will process the events that were |
261 | /// enqueued in the inner buffer of your [`EventQueue`] by the main app reading the socket. |
262 | /// |
263 | /// ### Integrating the event queue with other sources of events |
264 | /// |
265 | /// If your program needs to monitor other sources of events alongside the Wayland socket using a monitoring |
266 | /// system like `epoll`, you can integrate the Wayland socket into this system. This is done with the help |
267 | /// of the [`EventQueue::prepare_read()`](EventQueue::prepare_read) method. You event loop will be a bit more |
268 | /// explicit: |
269 | /// |
270 | /// ```rust,no_run |
271 | /// # use wayland_client::Connection; |
272 | /// # let connection = Connection::connect_to_env().unwrap(); |
273 | /// # let mut event_queue = connection.new_event_queue(); |
274 | /// # let mut state = (); |
275 | /// |
276 | /// loop { |
277 | /// // flush the outgoing buffers to ensure that the server does receive the messages |
278 | /// // you've sent |
279 | /// event_queue.flush().unwrap(); |
280 | /// |
281 | /// // (this step is only relevant if other threads might be reading the socket as well) |
282 | /// // make sure you don't have any pending events if the event queue that might have been |
283 | /// // enqueued by other threads reading the socket |
284 | /// event_queue.dispatch_pending(&mut state).unwrap(); |
285 | /// |
286 | /// // This puts in place some internal synchronization to prepare for the fact that |
287 | /// // you're going to wait for events on the socket and read them, in case other threads |
288 | /// // are doing the same thing |
289 | /// let read_guard = event_queue.prepare_read().unwrap(); |
290 | /// |
291 | /// /* |
292 | /// * At this point you can invoke epoll(..) to wait for readiness on the multiple FD you |
293 | /// * are working with, and read_guard.connection_fd() will give you the FD to wait on for |
294 | /// * the Wayland connection |
295 | /// */ |
296 | /// # let wayland_socket_ready = true; |
297 | /// |
298 | /// if wayland_socket_ready { |
299 | /// // If epoll notified readiness of the Wayland socket, you can now proceed to the read |
300 | /// read_guard.read().unwrap(); |
301 | /// // And now, you must invoke dispatch_pending() to actually process the events |
302 | /// event_queue.dispatch_pending(&mut state).unwrap(); |
303 | /// } else { |
304 | /// // otherwise, some of your other FD are ready, but you didn't receive Wayland events, |
305 | /// // you can drop the guard to cancel the read preparation |
306 | /// std::mem::drop(read_guard); |
307 | /// } |
308 | /// |
309 | /// /* |
310 | /// * There you process all relevant events from your other event sources |
311 | /// */ |
312 | /// } |
313 | /// ``` |
314 | pub struct EventQueue<State> { |
315 | handle: QueueHandle<State>, |
316 | conn: Connection, |
317 | } |
318 | |
319 | #[derive (Debug)] |
320 | pub(crate) struct EventQueueInner<State> { |
321 | queue: VecDeque<QueueEvent<State>>, |
322 | freeze_count: usize, |
323 | waker: Option<task::Waker>, |
324 | } |
325 | |
326 | impl<State> EventQueueInner<State> { |
327 | pub(crate) fn enqueue_event<I, U>( |
328 | &mut self, |
329 | msg: Message<ObjectId, OwnedFd>, |
330 | odata: Arc<dyn ObjectData>, |
331 | ) where |
332 | State: Dispatch<I, U> + 'static, |
333 | U: Send + Sync + 'static, |
334 | I: Proxy + 'static, |
335 | { |
336 | let func: fn queue_callback(…) -> … = queue_callback::<I, U, State>; |
337 | self.queue.push_back(QueueEvent(func, msg, odata)); |
338 | if self.freeze_count == 0 { |
339 | if let Some(waker: Waker) = self.waker.take() { |
340 | waker.wake(); |
341 | } |
342 | } |
343 | } |
344 | } |
345 | |
346 | impl<State> std::fmt::Debug for EventQueue<State> { |
347 | #[cfg_attr (coverage, coverage(off))] |
348 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
349 | f.debug_struct("EventQueue" ).field(name:"handle" , &self.handle).finish_non_exhaustive() |
350 | } |
351 | } |
352 | |
353 | impl<State> AsFd for EventQueue<State> { |
354 | /// Provides fd from [`Backend::poll_fd`] for polling. |
355 | fn as_fd(&self) -> BorrowedFd<'_> { |
356 | self.conn.as_fd() |
357 | } |
358 | } |
359 | |
360 | impl<State> EventQueue<State> { |
361 | pub(crate) fn new(conn: Connection) -> Self { |
362 | let inner = Arc::new(Mutex::new(EventQueueInner { |
363 | queue: VecDeque::new(), |
364 | freeze_count: 0, |
365 | waker: None, |
366 | })); |
367 | Self { handle: QueueHandle { inner }, conn } |
368 | } |
369 | |
370 | /// Get a [`QueueHandle`] for this event queue |
371 | pub fn handle(&self) -> QueueHandle<State> { |
372 | self.handle.clone() |
373 | } |
374 | |
375 | /// Dispatch pending events |
376 | /// |
377 | /// Events are accumulated in the event queue internal buffer when the Wayland socket is read using |
378 | /// the read APIs on [`Connection`](crate::Connection), or when reading is done from an other thread. |
379 | /// This method will dispatch all such pending events by sequentially invoking their associated handlers: |
380 | /// the [`Dispatch`](crate::Dispatch) implementations on the provided `&mut D`. |
381 | /// |
382 | /// Note: this may block if another thread has frozen the queue. |
383 | pub fn dispatch_pending(&mut self, data: &mut State) -> Result<usize, DispatchError> { |
384 | Self::dispatching_impl(&self.conn, &self.handle, data) |
385 | } |
386 | |
387 | /// Block waiting for events and dispatch them |
388 | /// |
389 | /// This method is similar to [`dispatch_pending`](EventQueue::dispatch_pending), but if there are no |
390 | /// pending events it will also flush the connection and block waiting for the Wayland server to send an |
391 | /// event. |
392 | /// |
393 | /// A simple app event loop can consist of invoking this method in a loop. |
394 | pub fn blocking_dispatch(&mut self, data: &mut State) -> Result<usize, DispatchError> { |
395 | let dispatched = self.dispatch_pending(data)?; |
396 | if dispatched > 0 { |
397 | return Ok(dispatched); |
398 | } |
399 | |
400 | self.conn.flush()?; |
401 | |
402 | if let Some(guard) = self.conn.prepare_read() { |
403 | crate::conn::blocking_read(guard)?; |
404 | } |
405 | |
406 | self.dispatch_pending(data) |
407 | } |
408 | |
409 | /// Synchronous roundtrip |
410 | /// |
411 | /// This function will cause a synchronous round trip with the wayland server. This function will block |
412 | /// until all requests in the queue are sent and processed by the server. |
413 | /// |
414 | /// This function may be useful during initial setup of your app. This function may also be useful |
415 | /// where you need to guarantee all requests prior to calling this function are completed. |
416 | pub fn roundtrip(&mut self, data: &mut State) -> Result<usize, DispatchError> { |
417 | let done = Arc::new(SyncData::default()); |
418 | |
419 | let display = self.conn.display(); |
420 | self.conn |
421 | .send_request( |
422 | &display, |
423 | crate::protocol::wl_display::Request::Sync {}, |
424 | Some(done.clone()), |
425 | ) |
426 | .map_err(|_| WaylandError::Io(rustix::io::Errno::PIPE.into()))?; |
427 | |
428 | let mut dispatched = 0; |
429 | |
430 | while !done.done.load(Ordering::Relaxed) { |
431 | dispatched += self.blocking_dispatch(data)?; |
432 | } |
433 | |
434 | Ok(dispatched) |
435 | } |
436 | |
437 | /// Start a synchronized read from the socket |
438 | /// |
439 | /// This is needed if you plan to wait on readiness of the Wayland socket using an event |
440 | /// loop. See the [`EventQueue`] and [`ReadEventsGuard`] docs for details. Once the events are received, |
441 | /// you'll then need to dispatch them from the event queue using |
442 | /// [`EventQueue::dispatch_pending()`](EventQueue::dispatch_pending). |
443 | /// |
444 | /// If this method returns `None`, you should invoke ['dispatch_pending()`](EventQueue::dispatch_pending) |
445 | /// before trying to invoke it again. |
446 | /// |
447 | /// If you don't need to manage multiple event sources, see |
448 | /// [`blocking_dispatch()`](EventQueue::blocking_dispatch) for a simpler mechanism. |
449 | /// |
450 | /// This method is identical to [`Connection::prepare_read()`]. |
451 | #[must_use ] |
452 | pub fn prepare_read(&self) -> Option<ReadEventsGuard> { |
453 | self.conn.prepare_read() |
454 | } |
455 | |
456 | /// Flush pending outgoing events to the server |
457 | /// |
458 | /// This needs to be done regularly to ensure the server receives all your requests. |
459 | /// /// This method is identical to [`Connection::flush()`]. |
460 | pub fn flush(&self) -> Result<(), WaylandError> { |
461 | self.conn.flush() |
462 | } |
463 | |
464 | fn dispatching_impl( |
465 | backend: &Connection, |
466 | qhandle: &QueueHandle<State>, |
467 | data: &mut State, |
468 | ) -> Result<usize, DispatchError> { |
469 | // This call will most of the time do nothing, but ensure that if the Connection is in guest mode |
470 | // from some external connection, only invoking `EventQueue::dispatch_pending()` will be enough to |
471 | // process the events assuming the host program already takes care of reading the socket. |
472 | // |
473 | // We purposefully ignore the possible error, as that would make us early return in a way that might |
474 | // lose events, and the potential socket error will be caught in other places anyway. |
475 | let mut dispatched = backend.backend.dispatch_inner_queue().unwrap_or_default(); |
476 | |
477 | while let Some(QueueEvent(cb, msg, odata)) = Self::try_next(&qhandle.inner) { |
478 | cb(backend, msg, data, odata, qhandle)?; |
479 | dispatched += 1; |
480 | } |
481 | Ok(dispatched) |
482 | } |
483 | |
484 | fn try_next(inner: &Mutex<EventQueueInner<State>>) -> Option<QueueEvent<State>> { |
485 | let mut lock = inner.lock().unwrap(); |
486 | if lock.freeze_count != 0 && !lock.queue.is_empty() { |
487 | let waker = Arc::new(DispatchWaker { cond: Condvar::new() }); |
488 | while lock.freeze_count != 0 { |
489 | lock.waker = Some(waker.clone().into()); |
490 | lock = waker.cond.wait(lock).unwrap(); |
491 | } |
492 | } |
493 | lock.queue.pop_front() |
494 | } |
495 | |
496 | /// Attempt to dispatch events from this queue, registering the current task for wakeup if no |
497 | /// events are pending. |
498 | /// |
499 | /// This method is similar to [`dispatch_pending`](EventQueue::dispatch_pending); it will not |
500 | /// perform reads on the Wayland socket. Reads on the socket by other tasks or threads will |
501 | /// cause the current task to wake up if events are pending on this queue. |
502 | /// |
503 | /// ``` |
504 | /// use futures_channel::mpsc::Receiver; |
505 | /// use futures_util::future::{poll_fn,select}; |
506 | /// use futures_util::stream::StreamExt; |
507 | /// use wayland_client::EventQueue; |
508 | /// |
509 | /// struct Data; |
510 | /// |
511 | /// enum AppEvent { |
512 | /// SomethingHappened(u32), |
513 | /// } |
514 | /// |
515 | /// impl Data { |
516 | /// fn handle(&mut self, event: AppEvent) { |
517 | /// // actual event handling goes here |
518 | /// } |
519 | /// } |
520 | /// |
521 | /// // An async task that is spawned on an executor in order to handle events that need access |
522 | /// // to a specific data object. |
523 | /// async fn run(data: &mut Data, mut wl_queue: EventQueue<Data>, mut app_queue: Receiver<AppEvent>) |
524 | /// -> Result<(), Box<dyn std::error::Error>> |
525 | /// { |
526 | /// use futures_util::future::Either; |
527 | /// loop { |
528 | /// match select( |
529 | /// poll_fn(|cx| wl_queue.poll_dispatch_pending(cx, data)), |
530 | /// app_queue.next(), |
531 | /// ).await { |
532 | /// Either::Left((res, _)) => match res? {}, |
533 | /// Either::Right((Some(event), _)) => { |
534 | /// data.handle(event); |
535 | /// } |
536 | /// Either::Right((None, _)) => return Ok(()), |
537 | /// } |
538 | /// } |
539 | /// } |
540 | /// ``` |
541 | pub fn poll_dispatch_pending( |
542 | &mut self, |
543 | cx: &mut task::Context, |
544 | data: &mut State, |
545 | ) -> task::Poll<Result<Infallible, DispatchError>> { |
546 | loop { |
547 | if let Err(e) = self.conn.backend.dispatch_inner_queue() { |
548 | return task::Poll::Ready(Err(e.into())); |
549 | } |
550 | let mut lock = self.handle.inner.lock().unwrap(); |
551 | if lock.freeze_count != 0 { |
552 | lock.waker = Some(cx.waker().clone()); |
553 | return task::Poll::Pending; |
554 | } |
555 | let QueueEvent(cb, msg, odata) = if let Some(elt) = lock.queue.pop_front() { |
556 | elt |
557 | } else { |
558 | lock.waker = Some(cx.waker().clone()); |
559 | return task::Poll::Pending; |
560 | }; |
561 | drop(lock); |
562 | cb(&self.conn, msg, data, odata, &self.handle)? |
563 | } |
564 | } |
565 | } |
566 | |
567 | struct DispatchWaker { |
568 | cond: Condvar, |
569 | } |
570 | |
571 | impl task::Wake for DispatchWaker { |
572 | fn wake(self: Arc<Self>) { |
573 | self.cond.notify_all() |
574 | } |
575 | } |
576 | |
577 | /// A handle representing an [`EventQueue`], used to assign objects upon creation. |
578 | pub struct QueueHandle<State> { |
579 | pub(crate) inner: Arc<Mutex<EventQueueInner<State>>>, |
580 | } |
581 | |
582 | /// A handle that temporarily pauses event processing on an [`EventQueue`]. |
583 | #[derive (Debug)] |
584 | pub struct QueueFreezeGuard<'a, State> { |
585 | qh: &'a QueueHandle<State>, |
586 | } |
587 | |
588 | impl<State> std::fmt::Debug for QueueHandle<State> { |
589 | #[cfg_attr (coverage, coverage(off))] |
590 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
591 | f.debug_struct("QueueHandle" ).field(name:"inner" , &Arc::as_ptr(&self.inner)).finish() |
592 | } |
593 | } |
594 | |
595 | impl<State> Clone for QueueHandle<State> { |
596 | fn clone(&self) -> Self { |
597 | Self { inner: self.inner.clone() } |
598 | } |
599 | } |
600 | |
601 | impl<State: 'static> QueueHandle<State> { |
602 | /// Create an object data associated with this event queue |
603 | /// |
604 | /// This creates an implementation of [`ObjectData`] fitting for direct use with `wayland-backend` APIs |
605 | /// that forwards all events to the event queue associated with this token, integrating the object into |
606 | /// the [`Dispatch`]-based logic of `wayland-client`. |
607 | pub fn make_data<I: Proxy + 'static, U: Send + Sync + 'static>( |
608 | &self, |
609 | user_data: U, |
610 | ) -> Arc<dyn ObjectData> |
611 | where |
612 | State: Dispatch<I, U, State>, |
613 | { |
614 | Arc::new(QueueProxyData::<I, U, State> { |
615 | handle: self.clone(), |
616 | udata: user_data, |
617 | _phantom: PhantomData, |
618 | }) |
619 | } |
620 | |
621 | /// Temporarily block processing on this queue. |
622 | /// |
623 | /// This will cause the associated queue to block (or return `NotReady` to poll) until all |
624 | /// [`QueueFreezeGuard`]s associated with the queue are dropped. |
625 | pub fn freeze(&self) -> QueueFreezeGuard<State> { |
626 | self.inner.lock().unwrap().freeze_count += 1; |
627 | QueueFreezeGuard { qh: self } |
628 | } |
629 | } |
630 | |
631 | impl<'a, State> Drop for QueueFreezeGuard<'a, State> { |
632 | fn drop(&mut self) { |
633 | let mut lock: MutexGuard<'_, EventQueueInner<…>> = self.qh.inner.lock().unwrap(); |
634 | lock.freeze_count -= 1; |
635 | if lock.freeze_count == 0 && !lock.queue.is_empty() { |
636 | if let Some(waker: Waker) = lock.waker.take() { |
637 | waker.wake(); |
638 | } |
639 | } |
640 | } |
641 | } |
642 | |
643 | fn queue_callback< |
644 | I: Proxy + 'static, |
645 | U: Send + Sync + 'static, |
646 | State: Dispatch<I, U, State> + 'static, |
647 | >( |
648 | handle: &Connection, |
649 | msg: Message<ObjectId, OwnedFd>, |
650 | data: &mut State, |
651 | odata: Arc<dyn ObjectData>, |
652 | qhandle: &QueueHandle<State>, |
653 | ) -> Result<(), DispatchError> { |
654 | let (proxy: I, event: ::Event) = I::parse_event(conn:handle, msg)?; |
655 | let udata: &U = odata.data_as_any().downcast_ref().expect(msg:"Wrong user_data value for object" ); |
656 | <State as Dispatch<I, U, State>>::event(state:data, &proxy, event, data:udata, conn:handle, qhandle); |
657 | Ok(()) |
658 | } |
659 | |
660 | /// The [`ObjectData`] implementation used by Wayland proxies, integrating with [`Dispatch`] |
661 | pub struct QueueProxyData<I: Proxy, U, State> { |
662 | handle: QueueHandle<State>, |
663 | /// The user data associated with this object |
664 | pub udata: U, |
665 | _phantom: PhantomData<fn(&I)>, |
666 | } |
667 | |
668 | impl<I: Proxy + 'static, U: Send + Sync + 'static, State> ObjectData for QueueProxyData<I, U, State> |
669 | where |
670 | State: Dispatch<I, U, State> + 'static, |
671 | { |
672 | fn event( |
673 | self: Arc<Self>, |
674 | _: &Backend, |
675 | msg: Message<ObjectId, OwnedFd>, |
676 | ) -> Option<Arc<dyn ObjectData>> { |
677 | let new_data: Option> = msgbool |
678 | .args |
679 | .iter() |
680 | .any(|arg: &Argument| matches!(arg, Argument::NewId(id) if !id.is_null())) |
681 | .then(|| State::event_created_child(msg.opcode, &self.handle)); |
682 | |
683 | self.handle.inner.lock().unwrap().enqueue_event::<I, U>(msg, self.clone()); |
684 | |
685 | new_data |
686 | } |
687 | |
688 | fn destroyed(&self, _: ObjectId) {} |
689 | |
690 | fn data_as_any(&self) -> &dyn Any { |
691 | &self.udata |
692 | } |
693 | } |
694 | |
695 | impl<I: Proxy, U: std::fmt::Debug, State> std::fmt::Debug for QueueProxyData<I, U, State> { |
696 | #[cfg_attr (coverage, coverage(off))] |
697 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
698 | f.debug_struct("QueueProxyData" ).field(name:"udata" , &self.udata).finish() |
699 | } |
700 | } |
701 | |
702 | struct TemporaryData; |
703 | |
704 | impl ObjectData for TemporaryData { |
705 | fn event( |
706 | self: Arc<Self>, |
707 | _: &Backend, |
708 | _: Message<ObjectId, OwnedFd>, |
709 | ) -> Option<Arc<dyn ObjectData>> { |
710 | unreachable!() |
711 | } |
712 | |
713 | fn destroyed(&self, _: ObjectId) {} |
714 | } |
715 | |
716 | /* |
717 | * Dispatch delegation helpers |
718 | */ |
719 | |
720 | /// A helper macro which delegates a set of [`Dispatch`] implementations for proxies to some other type which |
721 | /// provides a generic [`Dispatch`] implementation. |
722 | /// |
723 | /// This macro allows more easily delegating smaller parts of the protocol an application may wish to handle |
724 | /// in a modular fashion. |
725 | /// |
726 | /// # Usage |
727 | /// |
728 | /// For example, say you want to delegate events for [`WlRegistry`](crate::protocol::wl_registry::WlRegistry) |
729 | /// to the struct `DelegateToMe` for the [`Dispatch`] documentatione example. |
730 | /// |
731 | /// ``` |
732 | /// use wayland_client::{delegate_dispatch, protocol::wl_registry}; |
733 | /// # |
734 | /// # use wayland_client::Dispatch; |
735 | /// # |
736 | /// # struct DelegateToMe; |
737 | /// # struct MyUserData; |
738 | /// # |
739 | /// # impl<State> Dispatch<wl_registry::WlRegistry, MyUserData, State> for DelegateToMe |
740 | /// # where |
741 | /// # State: Dispatch<wl_registry::WlRegistry, MyUserData> + AsMut<DelegateToMe>, |
742 | /// # { |
743 | /// # fn event( |
744 | /// # _state: &mut State, |
745 | /// # _proxy: &wl_registry::WlRegistry, |
746 | /// # _event: wl_registry::Event, |
747 | /// # _udata: &MyUserData, |
748 | /// # _conn: &wayland_client::Connection, |
749 | /// # _qhandle: &wayland_client::QueueHandle<State>, |
750 | /// # ) { |
751 | /// # } |
752 | /// # } |
753 | /// |
754 | /// // ExampleApp is the type events will be dispatched to. |
755 | /// |
756 | /// /// The application state |
757 | /// struct ExampleApp { |
758 | /// /// The delegate for handling wl_registry events. |
759 | /// delegate: DelegateToMe, |
760 | /// } |
761 | /// |
762 | /// // Use delegate_dispatch to implement Dispatch<wl_registry::WlRegistry, MyUserData> for ExampleApp |
763 | /// delegate_dispatch!(ExampleApp: [wl_registry::WlRegistry: MyUserData] => DelegateToMe); |
764 | /// |
765 | /// // DelegateToMe requires that ExampleApp implements AsMut<DelegateToMe>, so we provide the |
766 | /// // trait implementation. |
767 | /// impl AsMut<DelegateToMe> for ExampleApp { |
768 | /// fn as_mut(&mut self) -> &mut DelegateToMe { |
769 | /// &mut self.delegate |
770 | /// } |
771 | /// } |
772 | /// |
773 | /// // To explain the macro above, you may read it as the following: |
774 | /// // |
775 | /// // For ExampleApp, delegate WlRegistry to DelegateToMe. |
776 | /// |
777 | /// // Assert ExampleApp can Dispatch events for wl_registry |
778 | /// fn assert_is_registry_delegate<T>() |
779 | /// where |
780 | /// T: Dispatch<wl_registry::WlRegistry, MyUserData>, |
781 | /// { |
782 | /// } |
783 | /// |
784 | /// assert_is_registry_delegate::<ExampleApp>(); |
785 | /// ``` |
786 | #[macro_export ] |
787 | macro_rules! delegate_dispatch { |
788 | ($(@< $( $lt:tt $( : $clt:tt $(+ $dlt:tt )* )? ),+ >)? $dispatch_from:ty : [$interface: ty: $udata: ty] => $dispatch_to: ty) => { |
789 | impl$(< $( $lt $( : $clt $(+ $dlt )* )? ),+ >)? $crate::Dispatch<$interface, $udata> for $dispatch_from { |
790 | fn event( |
791 | state: &mut Self, |
792 | proxy: &$interface, |
793 | event: <$interface as $crate::Proxy>::Event, |
794 | data: &$udata, |
795 | conn: &$crate::Connection, |
796 | qhandle: &$crate::QueueHandle<Self>, |
797 | ) { |
798 | <$dispatch_to as $crate::Dispatch<$interface, $udata, Self>>::event(state, proxy, event, data, conn, qhandle) |
799 | } |
800 | |
801 | fn event_created_child( |
802 | opcode: u16, |
803 | qhandle: &$crate::QueueHandle<Self> |
804 | ) -> ::std::sync::Arc<dyn $crate::backend::ObjectData> { |
805 | <$dispatch_to as $crate::Dispatch<$interface, $udata, Self>>::event_created_child(opcode, qhandle) |
806 | } |
807 | } |
808 | }; |
809 | } |
810 | |
811 | /// A helper macro which delegates a set of [`Dispatch`] implementations for proxies to a static handler. |
812 | /// |
813 | /// # Usage |
814 | /// |
815 | /// This macro is useful to implement [`Dispatch`] for interfaces where events are unimportant to |
816 | /// the current application and can be ignored. |
817 | /// |
818 | /// # Example |
819 | /// |
820 | /// ``` |
821 | /// use wayland_client::{delegate_noop, protocol::{wl_data_offer, wl_subcompositor}}; |
822 | /// |
823 | /// /// The application state |
824 | /// struct ExampleApp { |
825 | /// // ... |
826 | /// } |
827 | /// |
828 | /// // Ignore all events for this interface: |
829 | /// delegate_noop!(ExampleApp: ignore wl_data_offer::WlDataOffer); |
830 | /// |
831 | /// // This interface should not emit events: |
832 | /// delegate_noop!(ExampleApp: wl_subcompositor::WlSubcompositor); |
833 | /// ``` |
834 | /// |
835 | /// This last example will execute `unreachable!()` if the interface emits any events. |
836 | #[macro_export ] |
837 | macro_rules! delegate_noop { |
838 | ($(@< $( $lt:tt $( : $clt:tt $(+ $dlt:tt )* )? ),+ >)? $dispatch_from: ty : $interface: ty) => { |
839 | impl$(< $( $lt $( : $clt $(+ $dlt )* )? ),+ >)? $crate::Dispatch<$interface, ()> for $dispatch_from { |
840 | fn event( |
841 | _: &mut Self, |
842 | _: &$interface, |
843 | _: <$interface as $crate::Proxy>::Event, |
844 | _: &(), |
845 | _: &$crate::Connection, |
846 | _: &$crate::QueueHandle<Self>, |
847 | ) { |
848 | unreachable!(); |
849 | } |
850 | } |
851 | }; |
852 | |
853 | ($(@< $( $lt:tt $( : $clt:tt $(+ $dlt:tt )* )? ),+ >)? $dispatch_from: ty : ignore $interface: ty) => { |
854 | impl$(< $( $lt $( : $clt $(+ $dlt )* )? ),+ >)? $crate::Dispatch<$interface, ()> for $dispatch_from { |
855 | fn event( |
856 | _: &mut Self, |
857 | _: &$interface, |
858 | _: <$interface as $crate::Proxy>::Event, |
859 | _: &(), |
860 | _: &$crate::Connection, |
861 | _: &$crate::QueueHandle<Self>, |
862 | ) { |
863 | } |
864 | } |
865 | }; |
866 | } |
867 | |