1//! A pool implementation which automatically manage buffers.
2//!
3//! This pool is built on the [`RawPool`].
4//!
5//! The [`MultiPool`] takes a key which is used to identify buffers and tries to return the buffer associated to the key
6//! if possible. If no buffer in the pool is associated to the key, it will create a new one.
7//!
8//! # Example
9//!
10//! ```rust
11//! use smithay_client_toolkit::reexports::client::{
12//! QueueHandle,
13//! protocol::wl_surface::WlSurface,
14//! protocol::wl_shm::Format,
15//! };
16//! use smithay_client_toolkit::shm::multi::MultiPool;
17//!
18//! struct WlFoo {
19//! // The surface we'll draw on and the index of buffer associated to it
20//! surface: (WlSurface, usize),
21//! pool: MultiPool<(WlSurface, usize)>
22//! }
23//!
24//! impl WlFoo {
25//! fn draw(&mut self, qh: &QueueHandle<WlFoo>) {
26//! let surface = &self.surface.0;
27//! // We'll increment "i" until the pool can create a new buffer
28//! // if there's no buffer associated with our surface and "i" or if
29//! // a buffer with the obuffer associated with our surface and "i" is free for use.
30//! //
31//! // There's no limit to the amount of buffers we can allocate to our surface but since
32//! // shm buffers are released fairly fast, it's unlikely we'll need more than double buffering.
33//! for i in 0..2 {
34//! self.surface.1 = i;
35//! if let Ok((offset, buffer, slice)) = self.pool.create_buffer(
36//! 100,
37//! 100 * 4,
38//! 100,
39//! &self.surface,
40//! Format::Argb8888,
41//! ) {
42//! /*
43//! insert drawing code here
44//! */
45//! surface.attach(Some(buffer), 0, 0);
46//! surface.commit();
47//! // We exit the function after the draw.
48//! return;
49//! }
50//! }
51//! /*
52//! If there's no buffer available we can for example request a frame callback
53//! and trigger a redraw when it fires.
54//! (not shown in this example)
55//! */
56//! }
57//! }
58//!
59//! fn draw(slice: &mut [u8]) {
60//! todo!()
61//! }
62//!
63//! ```
64//!
65
66use std::borrow::Borrow;
67use std::io;
68use std::os::unix::io::OwnedFd;
69
70use std::sync::{
71 atomic::{AtomicBool, Ordering},
72 Arc,
73};
74use wayland_client::{
75 protocol::{wl_buffer, wl_shm},
76 Proxy,
77};
78
79use crate::globals::ProvidesBoundGlobal;
80
81use super::raw::RawPool;
82use super::CreatePoolError;
83
84#[derive(Debug, thiserror::Error)]
85pub enum PoolError {
86 #[error("buffer is currently used")]
87 InUse,
88 #[error("buffer is overlapping another")]
89 Overlap,
90 #[error("buffer could not be found")]
91 NotFound,
92}
93
94/// This pool manages buffers associated with keys.
95/// Only one buffer can be attributed to a given key.
96#[derive(Debug)]
97pub struct MultiPool<K> {
98 buffer_list: Vec<BufferSlot<K>>,
99 pub(crate) inner: RawPool,
100}
101
102#[derive(Debug, thiserror::Error)]
103pub struct BufferSlot<K> {
104 free: Arc<AtomicBool>,
105 size: usize,
106 used: usize,
107 offset: usize,
108 buffer: Option<wl_buffer::WlBuffer>,
109 key: K,
110}
111
112impl<K> Drop for BufferSlot<K> {
113 fn drop(&mut self) {
114 self.destroy().ok();
115 }
116}
117
118impl<K> BufferSlot<K> {
119 pub fn destroy(&self) -> Result<(), PoolError> {
120 self.buffer.as_ref().ok_or(PoolError::NotFound).and_then(|buffer: &{unknown}| {
121 self.free.load(Ordering::Relaxed).then(|| buffer.destroy()).ok_or(err:PoolError::InUse)
122 })
123 }
124}
125
126impl<K> MultiPool<K> {
127 pub fn new(shm: &impl ProvidesBoundGlobal<wl_shm::WlShm, 1>) -> Result<Self, CreatePoolError> {
128 Ok(Self { inner: RawPool::new(4096, shm)?, buffer_list: Vec::new() })
129 }
130
131 /// Resizes the memory pool, notifying the server the pool has changed in size.
132 ///
133 /// The wl_shm protocol only allows the pool to be made bigger. If the new size is smaller than the
134 /// current size of the pool, this function will do nothing.
135 pub fn resize(&mut self, size: usize) -> io::Result<()> {
136 self.inner.resize(size)
137 }
138
139 /// Removes the buffer with the given key from the pool and rearranges the others.
140 pub fn remove<Q>(&mut self, key: &Q) -> Option<BufferSlot<K>>
141 where
142 Q: PartialEq,
143 K: std::borrow::Borrow<Q>,
144 {
145 self.buffer_list
146 .iter()
147 .enumerate()
148 .find(|(_, slot)| slot.key.borrow().eq(key))
149 .map(|(i, _)| i)
150 .map(|i| self.buffer_list.remove(i))
151 }
152
153 /// Insert a buffer into the pool.
154 ///
155 /// The parameters are:
156 ///
157 /// - `width`: the width of this buffer (in pixels)
158 /// - `height`: the height of this buffer (in pixels)
159 /// - `stride`: distance (in bytes) between the beginning of a row and the next one
160 /// - `key`: a borrowed form of the stored key type
161 /// - `format`: the encoding format of the pixels.
162 pub fn insert<Q>(
163 &mut self,
164 width: i32,
165 stride: i32,
166 height: i32,
167 key: &Q,
168 format: wl_shm::Format,
169 ) -> Result<usize, PoolError>
170 where
171 K: Borrow<Q>,
172 Q: PartialEq + ToOwned<Owned = K>,
173 {
174 let mut offset = 0;
175 let mut found_key = false;
176 let size = (stride * height) as usize;
177 let mut index = Err(PoolError::NotFound);
178
179 for (i, buf_slot) in self.buffer_list.iter_mut().enumerate() {
180 if buf_slot.key.borrow().eq(key) {
181 found_key = true;
182 if buf_slot.free.load(Ordering::Relaxed) {
183 // Destroys the buffer if it's resized
184 if size != buf_slot.used {
185 if let Some(buffer) = buf_slot.buffer.take() {
186 buffer.destroy();
187 }
188 }
189 // Increases the size of the Buffer if it's too small and add 5% padding.
190 // It is possible this buffer overlaps the following but the else if
191 // statement prevents this buffer from being returned if that's the case.
192 buf_slot.size = buf_slot.size.max(size + size / 20);
193 index = Ok(i);
194 } else {
195 index = Err(PoolError::InUse);
196 }
197 // If a buffer is resized, it is likely that the followings might overlap
198 } else if offset > buf_slot.offset {
199 // When the buffer is free, it's safe to shift it because we know the compositor won't try to read it.
200 if buf_slot.free.load(Ordering::Relaxed) {
201 if offset != buf_slot.offset {
202 if let Some(buffer) = buf_slot.buffer.take() {
203 buffer.destroy();
204 }
205 }
206 buf_slot.offset = offset;
207 } else {
208 // If one of the overlapping buffers is busy, then no buffer can be returned because it could result in a data race.
209 index = Err(PoolError::InUse);
210 }
211 } else if found_key {
212 break;
213 }
214 let size = (buf_slot.size + 63) & !63;
215 offset += size;
216 }
217
218 if !found_key {
219 if let Err(err) = index {
220 return self
221 .dyn_resize(offset, width, stride, height, key.to_owned(), format)
222 .map(|_| self.buffer_list.len() - 1)
223 .ok_or(err);
224 }
225 }
226
227 index
228 }
229
230 /// Retreives the buffer associated with the given key.
231 ///
232 /// The parameters are:
233 ///
234 /// - `width`: the width of this buffer (in pixels)
235 /// - `height`: the height of this buffer (in pixels)
236 /// - `stride`: distance (in bytes) between the beginning of a row and the next one
237 /// - `key`: a borrowed form of the stored key type
238 /// - `format`: the encoding format of the pixels.
239 pub fn get<Q>(
240 &mut self,
241 width: i32,
242 stride: i32,
243 height: i32,
244 key: &Q,
245 format: wl_shm::Format,
246 ) -> Option<(usize, &wl_buffer::WlBuffer, &mut [u8])>
247 where
248 Q: PartialEq,
249 K: std::borrow::Borrow<Q>,
250 {
251 let len = self.inner.len();
252 let size = (stride * height) as usize;
253 let buf_slot =
254 self.buffer_list.iter_mut().find(|buf_slot| buf_slot.key.borrow().eq(key))?;
255
256 if buf_slot.size >= size {
257 return None;
258 }
259
260 buf_slot.used = size;
261 let offset = buf_slot.offset;
262 if buf_slot.buffer.is_none() {
263 if offset + size > len {
264 self.inner.resize(offset + size + size / 20).ok()?;
265 }
266 let free = Arc::new(AtomicBool::new(true));
267 let data = BufferObjectData { free: free.clone() };
268 let buffer = self.inner.create_buffer_raw(
269 offset as i32,
270 width,
271 height,
272 stride,
273 format,
274 Arc::new(data),
275 );
276 buf_slot.free = free;
277 buf_slot.buffer = Some(buffer);
278 }
279 let buf = buf_slot.buffer.as_ref()?;
280 buf_slot.free.store(false, Ordering::Relaxed);
281 Some((offset, buf, &mut self.inner.mmap()[offset..][..size]))
282 }
283
284 /// Returns the buffer associated with the given key and its offset (usize) in the mempool.
285 ///
286 /// The parameters are:
287 ///
288 /// - `width`: the width of this buffer (in pixels)
289 /// - `height`: the height of this buffer (in pixels)
290 /// - `stride`: distance (in bytes) between the beginning of a row and the next one
291 /// - `key`: a borrowed form of the stored key type
292 /// - `format`: the encoding format of the pixels.
293 ///
294 /// The offset can be used to determine whether or not a buffer was moved in the mempool
295 /// and by consequence if it should be damaged partially or fully.
296 pub fn create_buffer<Q>(
297 &mut self,
298 width: i32,
299 stride: i32,
300 height: i32,
301 key: &Q,
302 format: wl_shm::Format,
303 ) -> Result<(usize, &wl_buffer::WlBuffer, &mut [u8]), PoolError>
304 where
305 K: Borrow<Q>,
306 Q: PartialEq + ToOwned<Owned = K>,
307 {
308 let index = self.insert(width, stride, height, key, format)?;
309 self.get_at(index, width, stride, height, format)
310 }
311
312 /// Retreives the buffer at the given index.
313 fn get_at(
314 &mut self,
315 index: usize,
316 width: i32,
317 stride: i32,
318 height: i32,
319 format: wl_shm::Format,
320 ) -> Result<(usize, &wl_buffer::WlBuffer, &mut [u8]), PoolError> {
321 let len = self.inner.len();
322 let size = (stride * height) as usize;
323 let buf_slot = self.buffer_list.get_mut(index).ok_or(PoolError::NotFound)?;
324
325 if buf_slot.size > size {
326 return Err(PoolError::Overlap);
327 }
328
329 buf_slot.used = size;
330 let offset = buf_slot.offset;
331 if buf_slot.buffer.is_none() {
332 if offset + size > len {
333 self.inner.resize(offset + size + size / 20).map_err(|_| PoolError::Overlap)?;
334 }
335 let free = Arc::new(AtomicBool::new(true));
336 let data = BufferObjectData { free: free.clone() };
337 let buffer = self.inner.create_buffer_raw(
338 offset as i32,
339 width,
340 height,
341 stride,
342 format,
343 Arc::new(data),
344 );
345 buf_slot.free = free;
346 buf_slot.buffer = Some(buffer);
347 }
348 buf_slot.free.store(false, Ordering::Relaxed);
349 let buf = buf_slot.buffer.as_ref().unwrap();
350 Ok((offset, buf, &mut self.inner.mmap()[offset..][..size]))
351 }
352
353 /// Calcule the offet and size of a buffer based on its stride.
354 fn offset(&self, mut offset: i32, stride: i32, height: i32) -> (usize, usize) {
355 // bytes per pixel
356 let size = stride * height;
357 // 5% padding.
358 offset += offset / 20;
359 offset = (offset + 63) & !63;
360 (offset as usize, size as usize)
361 }
362
363 #[allow(clippy::too_many_arguments)]
364 /// Resizes the pool and appends a new buffer.
365 fn dyn_resize(
366 &mut self,
367 offset: usize,
368 width: i32,
369 stride: i32,
370 height: i32,
371 key: K,
372 format: wl_shm::Format,
373 ) -> Option<()> {
374 let (offset, size) = self.offset(offset as i32, stride, height);
375 if self.inner.len() < offset + size {
376 self.resize(offset + size + size / 20).ok()?;
377 }
378 let free = Arc::new(AtomicBool::new(true));
379 let data = BufferObjectData { free: free.clone() };
380 let buffer = self.inner.create_buffer_raw(
381 offset as i32,
382 width,
383 height,
384 stride,
385 format,
386 Arc::new(data),
387 );
388 self.buffer_list.push(BufferSlot {
389 offset,
390 used: 0,
391 free,
392 buffer: Some(buffer),
393 size,
394 key,
395 });
396 Some(())
397 }
398}
399
400struct BufferObjectData {
401 free: Arc<AtomicBool>,
402}
403
404impl wayland_client::backend::ObjectData for BufferObjectData {
405 fn event(
406 self: Arc<Self>,
407 _backend: &wayland_backend::client::Backend,
408 msg: wayland_backend::protocol::Message<wayland_backend::client::ObjectId, OwnedFd>,
409 ) -> Option<Arc<dyn wayland_backend::client::ObjectData>> {
410 debug_assert!(wayland_client::backend::protocol::same_interface(
411 msg.sender_id.interface(),
412 wl_buffer::WlBuffer::interface()
413 ));
414 debug_assert!(msg.opcode == 0);
415 // wl_buffer only has a single event: wl_buffer.release
416 self.free.store(val:true, order:Ordering::Relaxed);
417 None
418 }
419
420 fn destroyed(&self, _: wayland_backend::client::ObjectId) {}
421}
422