1 | //! A pool implementation based on buffer slots |
2 | |
3 | use std::io; |
4 | use std::{ |
5 | os::unix::io::{AsRawFd, OwnedFd}, |
6 | sync::{ |
7 | atomic::{AtomicU8, AtomicUsize, Ordering}, |
8 | Arc, Mutex, Weak, |
9 | }, |
10 | }; |
11 | |
12 | use wayland_client::{ |
13 | protocol::{wl_buffer, wl_shm, wl_surface}, |
14 | Proxy, |
15 | }; |
16 | |
17 | use crate::{globals::ProvidesBoundGlobal, shm::raw::RawPool, shm::CreatePoolError}; |
18 | |
19 | #[derive (Debug, thiserror::Error)] |
20 | pub enum CreateBufferError { |
21 | /// Slot creation error. |
22 | #[error(transparent)] |
23 | Io(#[from] io::Error), |
24 | |
25 | /// Pool mismatch. |
26 | #[error("Incorrect pool for slot" )] |
27 | PoolMismatch, |
28 | |
29 | /// Slot size mismatch |
30 | #[error("Requested buffer size is too large for slot" )] |
31 | SlotTooSmall, |
32 | } |
33 | |
34 | #[derive (Debug, thiserror::Error)] |
35 | pub enum ActivateSlotError { |
36 | /// Buffer was already active |
37 | #[error("Buffer was already active" )] |
38 | AlreadyActive, |
39 | } |
40 | |
41 | #[derive (Debug)] |
42 | pub struct SlotPool { |
43 | pub(crate) inner: RawPool, |
44 | free_list: Arc<Mutex<Vec<FreelistEntry>>>, |
45 | } |
46 | |
47 | #[derive (Debug)] |
48 | struct FreelistEntry { |
49 | offset: usize, |
50 | len: usize, |
51 | } |
52 | |
53 | /// A chunk of memory allocated from a [SlotPool] |
54 | /// |
55 | /// Retaining this object is only required if you wish to resize or change the buffer's format |
56 | /// without changing the contents of the backing memory. |
57 | #[derive (Debug)] |
58 | pub struct Slot { |
59 | inner: Arc<SlotInner>, |
60 | } |
61 | |
62 | #[derive (Debug)] |
63 | struct SlotInner { |
64 | free_list: Weak<Mutex<Vec<FreelistEntry>>>, |
65 | offset: usize, |
66 | len: usize, |
67 | active_buffers: AtomicUsize, |
68 | /// Count of all "real" references to this slot. This includes all Slot objects and any |
69 | /// BufferData object that is not in the DEAD state. When this reaches zero, the memory for |
70 | /// this slot will return to the free_list. It is not possible for it to reach zero and have a |
71 | /// Slot or Buffer referring to it. |
72 | all_refs: AtomicUsize, |
73 | } |
74 | |
75 | /// A wrapper around a [`wl_buffer::WlBuffer`] which has been allocated via a [SlotPool]. |
76 | /// |
77 | /// When this object is dropped, the buffer will be destroyed immediately if it is not active, or |
78 | /// upon the server's release if it is. |
79 | #[derive (Debug)] |
80 | pub struct Buffer { |
81 | buffer: wl_buffer::WlBuffer, |
82 | height: i32, |
83 | stride: i32, |
84 | slot: Slot, |
85 | } |
86 | |
87 | /// ObjectData for the WlBuffer |
88 | #[derive (Debug)] |
89 | struct BufferData { |
90 | inner: Arc<SlotInner>, |
91 | state: AtomicU8, |
92 | } |
93 | |
94 | // These constants define the value of BufferData::state, since AtomicEnum does not exist. |
95 | impl BufferData { |
96 | /// Buffer is counted in active_buffers list; will return to INACTIVE on Release. |
97 | const ACTIVE: u8 = 0; |
98 | |
99 | /// Buffer is not counted in active_buffers list, but also has not been destroyed. |
100 | const INACTIVE: u8 = 1; |
101 | |
102 | /// Buffer is counted in active_buffers list; will move to DEAD on Release |
103 | const DESTROY_ON_RELEASE: u8 = 2; |
104 | |
105 | /// Buffer has been destroyed |
106 | const DEAD: u8 = 3; |
107 | |
108 | /// Value that is ORed on buffer release to transition to the next state |
109 | const RELEASE_SET: u8 = 1; |
110 | |
111 | /// Value that is ORed on buffer destroy to transition to the next state |
112 | const DESTROY_SET: u8 = 2; |
113 | |
114 | /// Call after successfully transitioning the state to DEAD |
115 | fn record_death(&self) { |
116 | drop(Slot { inner: self.inner.clone() }) |
117 | } |
118 | } |
119 | |
120 | impl SlotPool { |
121 | pub fn new( |
122 | len: usize, |
123 | shm: &impl ProvidesBoundGlobal<wl_shm::WlShm, 1>, |
124 | ) -> Result<Self, CreatePoolError> { |
125 | let inner = RawPool::new(len, shm)?; |
126 | let free_list = Arc::new(Mutex::new(vec![FreelistEntry { offset: 0, len: inner.len() }])); |
127 | Ok(SlotPool { inner, free_list }) |
128 | } |
129 | |
130 | /// Create a new buffer in a new slot. |
131 | /// |
132 | /// This returns the buffer and the canvas. The parameters are: |
133 | /// |
134 | /// - `width`: the width of this buffer (in pixels) |
135 | /// - `height`: the height of this buffer (in pixels) |
136 | /// - `stride`: distance (in bytes) between the beginning of a row and the next one |
137 | /// - `format`: the encoding format of the pixels. Using a format that was not |
138 | /// advertised to the `wl_shm` global by the server is a protocol error and will |
139 | /// terminate your connection. |
140 | /// |
141 | /// The [Slot] for this buffer will have exactly the size required for the data. It can be |
142 | /// accessed via [Buffer::slot] to create additional buffers that point to the same data. This |
143 | /// is required if you wish to change formats, buffer dimensions, or attach a canvas to |
144 | /// multiple surfaces. |
145 | /// |
146 | /// For more control over sizing, use [Self::new_slot] and [Self::create_buffer_in]. |
147 | pub fn create_buffer( |
148 | &mut self, |
149 | width: i32, |
150 | height: i32, |
151 | stride: i32, |
152 | format: wl_shm::Format, |
153 | ) -> Result<(Buffer, &mut [u8]), CreateBufferError> { |
154 | let len = (height as usize) * (stride as usize); |
155 | let slot = self.new_slot(len)?; |
156 | let buffer = self.create_buffer_in(&slot, width, height, stride, format)?; |
157 | let canvas = self.raw_data_mut(&slot); |
158 | Ok((buffer, canvas)) |
159 | } |
160 | |
161 | /// Get the bytes corresponding to a given slot or buffer if drawing to the slot is permitted. |
162 | /// |
163 | /// Returns `None` if there are active buffers in the slot or if the slot does not correspond |
164 | /// to this pool. |
165 | pub fn canvas(&mut self, key: &impl CanvasKey) -> Option<&mut [u8]> { |
166 | key.canvas(self) |
167 | } |
168 | |
169 | /// Returns the size, in bytes, of this pool. |
170 | #[allow (clippy::len_without_is_empty)] |
171 | pub fn len(&self) -> usize { |
172 | self.inner.len() |
173 | } |
174 | |
175 | /// Resizes the memory pool, notifying the server the pool has changed in size. |
176 | /// |
177 | /// This is an optimization; the pool automatically resizes when you allocate new slots. |
178 | pub fn resize(&mut self, size: usize) -> io::Result<()> { |
179 | let old_len = self.inner.len(); |
180 | self.inner.resize(size)?; |
181 | let new_len = self.inner.len(); |
182 | if old_len == new_len { |
183 | return Ok(()); |
184 | } |
185 | // add the new memory to the freelist |
186 | let mut free = self.free_list.lock().unwrap(); |
187 | if let Some(FreelistEntry { offset, len }) = free.last_mut() { |
188 | if *offset + *len == old_len { |
189 | *len += new_len - old_len; |
190 | return Ok(()); |
191 | } |
192 | } |
193 | free.push(FreelistEntry { offset: old_len, len: new_len - old_len }); |
194 | Ok(()) |
195 | } |
196 | |
197 | fn alloc(&mut self, size: usize) -> io::Result<usize> { |
198 | let mut free = self.free_list.lock().unwrap(); |
199 | for FreelistEntry { offset, len } in free.iter_mut() { |
200 | if *len >= size { |
201 | let rv = *offset; |
202 | *len -= size; |
203 | *offset += size; |
204 | return Ok(rv); |
205 | } |
206 | } |
207 | let mut rv = self.inner.len(); |
208 | let mut pop_tail = false; |
209 | if let Some(FreelistEntry { offset, len }) = free.last() { |
210 | if offset + len == self.inner.len() { |
211 | rv -= len; |
212 | pop_tail = true; |
213 | } |
214 | } |
215 | // resize like Vec::reserve, always at least doubling |
216 | let target = std::cmp::max(rv + size, self.inner.len() * 2); |
217 | self.inner.resize(target)?; |
218 | // adjust the end of the freelist here |
219 | if pop_tail { |
220 | free.pop(); |
221 | } |
222 | if target > rv + size { |
223 | free.push(FreelistEntry { offset: rv + size, len: target - rv - size }); |
224 | } |
225 | Ok(rv) |
226 | } |
227 | |
228 | fn free(free_list: &Mutex<Vec<FreelistEntry>>, mut offset: usize, mut len: usize) { |
229 | let mut free = free_list.lock().unwrap(); |
230 | let mut nf = Vec::with_capacity(free.len() + 1); |
231 | for &FreelistEntry { offset: ioff, len: ilen } in free.iter() { |
232 | if ioff + ilen == offset { |
233 | offset = ioff; |
234 | len += ilen; |
235 | continue; |
236 | } |
237 | if ioff == offset + len { |
238 | len += ilen; |
239 | continue; |
240 | } |
241 | if ioff > offset + len && len != 0 { |
242 | nf.push(FreelistEntry { offset, len }); |
243 | len = 0; |
244 | } |
245 | if ilen != 0 { |
246 | nf.push(FreelistEntry { offset: ioff, len: ilen }); |
247 | } |
248 | } |
249 | if len != 0 { |
250 | nf.push(FreelistEntry { offset, len }); |
251 | } |
252 | *free = nf; |
253 | } |
254 | |
255 | /// Create a new slot with the given size in bytes. |
256 | pub fn new_slot(&mut self, mut len: usize) -> io::Result<Slot> { |
257 | len = (len + 63) & !63; |
258 | let offset = self.alloc(len)?; |
259 | |
260 | Ok(Slot { |
261 | inner: Arc::new(SlotInner { |
262 | free_list: Arc::downgrade(&self.free_list), |
263 | offset, |
264 | len, |
265 | active_buffers: AtomicUsize::new(0), |
266 | all_refs: AtomicUsize::new(1), |
267 | }), |
268 | }) |
269 | } |
270 | |
271 | /// Get the bytes corresponding to a given slot. |
272 | /// |
273 | /// Note: prefer using [Self::canvas], which will prevent drawing to a buffer that has not been |
274 | /// released by the server. |
275 | /// |
276 | /// Returns an empty buffer if the slot does not belong to this pool. |
277 | pub fn raw_data_mut(&mut self, slot: &Slot) -> &mut [u8] { |
278 | if slot.inner.free_list.as_ptr() == Arc::as_ptr(&self.free_list) { |
279 | &mut self.inner.mmap()[slot.inner.offset..][..slot.inner.len] |
280 | } else { |
281 | &mut [] |
282 | } |
283 | } |
284 | |
285 | /// Create a new buffer corresponding to a slot. |
286 | /// |
287 | /// The parameters are: |
288 | /// |
289 | /// - `width`: the width of this buffer (in pixels) |
290 | /// - `height`: the height of this buffer (in pixels) |
291 | /// - `stride`: distance (in bytes) between the beginning of a row and the next one |
292 | /// - `format`: the encoding format of the pixels. Using a format that was not |
293 | /// advertised to the `wl_shm` global by the server is a protocol error and will |
294 | /// terminate your connection |
295 | pub fn create_buffer_in( |
296 | &mut self, |
297 | slot: &Slot, |
298 | width: i32, |
299 | height: i32, |
300 | stride: i32, |
301 | format: wl_shm::Format, |
302 | ) -> Result<Buffer, CreateBufferError> { |
303 | let offset = slot.inner.offset as i32; |
304 | let len = (height as usize) * (stride as usize); |
305 | if len > slot.inner.len { |
306 | return Err(CreateBufferError::SlotTooSmall); |
307 | } |
308 | |
309 | if slot.inner.free_list.as_ptr() != Arc::as_ptr(&self.free_list) { |
310 | return Err(CreateBufferError::PoolMismatch); |
311 | } |
312 | |
313 | let slot = slot.clone(); |
314 | // take a ref for the BufferData, which will be destroyed by BufferData::record_death |
315 | slot.inner.all_refs.fetch_add(1, Ordering::Relaxed); |
316 | let data = Arc::new(BufferData { |
317 | inner: slot.inner.clone(), |
318 | state: AtomicU8::new(BufferData::INACTIVE), |
319 | }); |
320 | let buffer = self.inner.create_buffer_raw(offset, width, height, stride, format, data); |
321 | Ok(Buffer { buffer, height, stride, slot }) |
322 | } |
323 | } |
324 | |
325 | impl Clone for Slot { |
326 | fn clone(&self) -> Self { |
327 | let inner: Arc = self.inner.clone(); |
328 | inner.all_refs.fetch_add(val:1, order:Ordering::Relaxed); |
329 | Slot { inner } |
330 | } |
331 | } |
332 | |
333 | impl Drop for Slot { |
334 | fn drop(&mut self) { |
335 | if self.inner.all_refs.fetch_sub(val:1, order:Ordering::Relaxed) == 1 { |
336 | if let Some(free_list: Arc>>) = self.inner.free_list.upgrade() { |
337 | SlotPool::free(&free_list, self.inner.offset, self.inner.len); |
338 | } |
339 | } |
340 | } |
341 | } |
342 | |
343 | impl Drop for SlotInner { |
344 | fn drop(&mut self) { |
345 | debug_assert_eq!(*self.all_refs.get_mut(), 0); |
346 | } |
347 | } |
348 | |
349 | /// A helper trait for [SlotPool::canvas]. |
350 | pub trait CanvasKey { |
351 | fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]>; |
352 | } |
353 | |
354 | impl Slot { |
355 | /// Return true if there are buffers referencing this slot whose contents are being accessed |
356 | /// by the server. |
357 | pub fn has_active_buffers(&self) -> bool { |
358 | self.inner.active_buffers.load(Ordering::Relaxed) != 0 |
359 | } |
360 | |
361 | /// Returns the size, in bytes, of this slot. |
362 | #[allow (clippy::len_without_is_empty)] |
363 | pub fn len(&self) -> usize { |
364 | self.inner.len |
365 | } |
366 | |
367 | /// Get the bytes corresponding to a given slot if drawing to the slot is permitted. |
368 | /// |
369 | /// Returns `None` if there are active buffers in the slot or if the slot does not correspond |
370 | /// to this pool. |
371 | pub fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> { |
372 | if self.has_active_buffers() { |
373 | return None; |
374 | } |
375 | if self.inner.free_list.as_ptr() == Arc::as_ptr(&pool.free_list) { |
376 | Some(&mut pool.inner.mmap()[self.inner.offset..][..self.inner.len]) |
377 | } else { |
378 | None |
379 | } |
380 | } |
381 | } |
382 | |
383 | impl CanvasKey for Slot { |
384 | fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> { |
385 | self.canvas(pool) |
386 | } |
387 | } |
388 | |
389 | impl Buffer { |
390 | /// Attach a buffer to a surface. |
391 | /// |
392 | /// This marks the slot as active until the server releases the buffer, which will happen |
393 | /// automatically assuming the surface is committed without attaching a different buffer. |
394 | /// |
395 | /// Note: if you need to ensure that [`canvas()`](Buffer::canvas) calls never return data that |
396 | /// could be attached to a surface in a multi-threaded client, make this call while you have |
397 | /// exclusive access to the corresponding [`SlotPool`]. |
398 | pub fn attach_to(&self, surface: &wl_surface::WlSurface) -> Result<(), ActivateSlotError> { |
399 | self.activate()?; |
400 | surface.attach(Some(&self.buffer), 0, 0); |
401 | Ok(()) |
402 | } |
403 | |
404 | /// Get the inner buffer. |
405 | pub fn wl_buffer(&self) -> &wl_buffer::WlBuffer { |
406 | &self.buffer |
407 | } |
408 | |
409 | pub fn height(&self) -> i32 { |
410 | self.height |
411 | } |
412 | |
413 | pub fn stride(&self) -> i32 { |
414 | self.stride |
415 | } |
416 | |
417 | fn data(&self) -> Option<&BufferData> { |
418 | self.buffer.object_data()?.downcast_ref() |
419 | } |
420 | |
421 | /// Get the bytes corresponding to this buffer if drawing is permitted. |
422 | /// |
423 | /// This may be smaller than the canvas associated with the slot. |
424 | pub fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> { |
425 | let len = (self.height as usize) * (self.stride as usize); |
426 | if self.slot.inner.active_buffers.load(Ordering::Relaxed) != 0 { |
427 | return None; |
428 | } |
429 | if self.slot.inner.free_list.as_ptr() == Arc::as_ptr(&pool.free_list) { |
430 | Some(&mut pool.inner.mmap()[self.slot.inner.offset..][..len]) |
431 | } else { |
432 | None |
433 | } |
434 | } |
435 | |
436 | /// Get the slot corresponding to this buffer. |
437 | pub fn slot(&self) -> Slot { |
438 | self.slot.clone() |
439 | } |
440 | |
441 | /// Manually mark a buffer as active. |
442 | /// |
443 | /// An active buffer prevents drawing on its slot until a Release event is received or until |
444 | /// manually deactivated. |
445 | pub fn activate(&self) -> Result<(), ActivateSlotError> { |
446 | let data = self.data().expect("UserData type mismatch" ); |
447 | |
448 | // This bitwise AND will transition INACTIVE -> ACTIVE, or do nothing if the buffer was |
449 | // already ACTIVE. No other ordering is required, as the server will not send a Release |
450 | // until we send our attach after returning Ok. |
451 | match data.state.fetch_and(!BufferData::RELEASE_SET, Ordering::Relaxed) { |
452 | BufferData::INACTIVE => { |
453 | data.inner.active_buffers.fetch_add(1, Ordering::Relaxed); |
454 | Ok(()) |
455 | } |
456 | BufferData::ACTIVE => Err(ActivateSlotError::AlreadyActive), |
457 | _ => unreachable!("Invalid state in BufferData" ), |
458 | } |
459 | } |
460 | |
461 | /// Manually mark a buffer as inactive. |
462 | /// |
463 | /// This should be used when the buffer was manually marked as active or when a buffer was |
464 | /// attached to a surface but not committed. Calling this function on a buffer that was |
465 | /// committed to a surface risks making the surface contents undefined. |
466 | pub fn deactivate(&self) -> Result<(), ActivateSlotError> { |
467 | let data = self.data().expect("UserData type mismatch" ); |
468 | |
469 | // Same operation as the Release event, but we know the Buffer was not dropped. |
470 | match data.state.fetch_or(BufferData::RELEASE_SET, Ordering::Relaxed) { |
471 | BufferData::ACTIVE => { |
472 | data.inner.active_buffers.fetch_sub(1, Ordering::Relaxed); |
473 | Ok(()) |
474 | } |
475 | BufferData::INACTIVE => Err(ActivateSlotError::AlreadyActive), |
476 | _ => unreachable!("Invalid state in BufferData" ), |
477 | } |
478 | } |
479 | } |
480 | |
481 | impl CanvasKey for Buffer { |
482 | fn canvas<'pool>(&self, pool: &'pool mut SlotPool) -> Option<&'pool mut [u8]> { |
483 | self.canvas(pool) |
484 | } |
485 | } |
486 | |
487 | impl Drop for Buffer { |
488 | fn drop(&mut self) { |
489 | if let Some(data: &BufferData) = self.data() { |
490 | match data.state.fetch_or(val:BufferData::DESTROY_SET, order:Ordering::Relaxed) { |
491 | BufferData::ACTIVE => { |
492 | // server is using the buffer, let ObjectData handle the destroy |
493 | } |
494 | BufferData::INACTIVE => { |
495 | data.record_death(); |
496 | self.buffer.destroy(); |
497 | } |
498 | _ => unreachable!("Invalid state in BufferData" ), |
499 | } |
500 | } |
501 | } |
502 | } |
503 | |
504 | impl wayland_client::backend::ObjectData for BufferData { |
505 | fn event( |
506 | self: Arc<Self>, |
507 | handle: &wayland_client::backend::Backend, |
508 | msg: wayland_backend::protocol::Message<wayland_backend::client::ObjectId, OwnedFd>, |
509 | ) -> Option<Arc<dyn wayland_backend::client::ObjectData>> { |
510 | debug_assert!(wayland_client::backend::protocol::same_interface( |
511 | msg.sender_id.interface(), |
512 | wl_buffer::WlBuffer::interface() |
513 | )); |
514 | debug_assert!(msg.opcode == 0); |
515 | |
516 | match self.state.fetch_or(BufferData::RELEASE_SET, Ordering::Relaxed) { |
517 | BufferData::ACTIVE => { |
518 | self.inner.active_buffers.fetch_sub(1, Ordering::Relaxed); |
519 | } |
520 | BufferData::INACTIVE => { |
521 | // possible spurious release, or someone called deactivate incorrectly |
522 | log::debug!("Unexpected WlBuffer::Release on an inactive buffer" ); |
523 | } |
524 | BufferData::DESTROY_ON_RELEASE => { |
525 | self.record_death(); |
526 | self.inner.active_buffers.fetch_sub(1, Ordering::Relaxed); |
527 | |
528 | // The Destroy message is identical to Release message (no args, same ID), so just reply |
529 | handle |
530 | .send_request(msg.map_fd(|x| x.as_raw_fd()), None, None) |
531 | .expect("Unexpected invalid ID" ); |
532 | } |
533 | BufferData::DEAD => { |
534 | // no-op, this object is already unusable |
535 | } |
536 | _ => unreachable!("Invalid state in BufferData" ), |
537 | } |
538 | |
539 | None |
540 | } |
541 | |
542 | fn destroyed(&self, _: wayland_backend::client::ObjectId) {} |
543 | } |
544 | |
545 | impl Drop for BufferData { |
546 | fn drop(&mut self) { |
547 | let state: u8 = *self.state.get_mut(); |
548 | if state == BufferData::ACTIVE || state == BufferData::DESTROY_ON_RELEASE { |
549 | // Release the active-buffer count |
550 | self.inner.active_buffers.fetch_sub(val:1, order:Ordering::Relaxed); |
551 | } |
552 | |
553 | if state != BufferData::DEAD { |
554 | // nobody has ever transitioned state to DEAD, so we are responsible for freeing the |
555 | // extra reference |
556 | self.record_death(); |
557 | } |
558 | } |
559 | } |
560 | |