| 1 | use super::pixel_format::blend; |
| 2 | use super::PixelFormat; |
| 3 | use crate::BitMapBackend; |
| 4 | use plotters_backend::DrawingBackend; |
| 5 | |
| 6 | /// The marker type that indicates we are currently using a BGRX8888 pixel format |
| 7 | pub struct BGRXPixel; |
| 8 | |
| 9 | impl PixelFormat for BGRXPixel { |
| 10 | const PIXEL_SIZE: usize = 4; |
| 11 | const EFFECTIVE_PIXEL_SIZE: usize = 3; |
| 12 | |
| 13 | #[inline (always)] |
| 14 | fn byte_at(r: u8, g: u8, b: u8, _a: u64, idx: usize) -> u8 { |
| 15 | match idx { |
| 16 | 0 => b, |
| 17 | 1 => g, |
| 18 | 2 => r, |
| 19 | _ => 0xff, |
| 20 | } |
| 21 | } |
| 22 | |
| 23 | #[inline (always)] |
| 24 | fn decode_pixel(data: &[u8]) -> (u8, u8, u8, u64) { |
| 25 | (data[2], data[1], data[0], 0x255) |
| 26 | } |
| 27 | |
| 28 | #[allow (clippy::many_single_char_names, clippy::cast_ptr_alignment)] |
| 29 | fn blend_rect_fast( |
| 30 | target: &mut BitMapBackend<'_, Self>, |
| 31 | upper_left: (i32, i32), |
| 32 | bottom_right: (i32, i32), |
| 33 | r: u8, |
| 34 | g: u8, |
| 35 | b: u8, |
| 36 | a: f64, |
| 37 | ) { |
| 38 | let (w, h) = target.get_size(); |
| 39 | let a = a.clamp(0.0, 1.0); |
| 40 | if a == 0.0 { |
| 41 | return; |
| 42 | } |
| 43 | |
| 44 | let (x0, y0) = ( |
| 45 | upper_left.0.min(bottom_right.0).max(0), |
| 46 | upper_left.1.min(bottom_right.1).max(0), |
| 47 | ); |
| 48 | let (x1, y1) = ( |
| 49 | upper_left.0.max(bottom_right.0).min(w as i32), |
| 50 | upper_left.1.max(bottom_right.1).min(h as i32), |
| 51 | ); |
| 52 | |
| 53 | // This may happen when the minimal value is larger than the limit. |
| 54 | // Thus we just have something that is completely out-of-range |
| 55 | if x0 >= x1 || y0 >= y1 { |
| 56 | return; |
| 57 | } |
| 58 | |
| 59 | let dst = target.get_raw_pixel_buffer(); |
| 60 | |
| 61 | let a = (256.0 * a).floor() as u64; |
| 62 | |
| 63 | // Since we should always make sure the RGB payload occupies the logic lower bits |
| 64 | // thus, this type purning should work for both LE and BE CPUs |
| 65 | #[rustfmt::skip] |
| 66 | let p: u64 = unsafe { |
| 67 | std::mem::transmute([ |
| 68 | u16::from(b), u16::from(r), u16::from(b), u16::from(r), // QW1 |
| 69 | ]) |
| 70 | }; |
| 71 | |
| 72 | #[rustfmt::skip] |
| 73 | let q: u64 = unsafe { |
| 74 | std::mem::transmute([ |
| 75 | u16::from(g), 0u16, u16::from(g), 0u16, // QW1 |
| 76 | ]) |
| 77 | }; |
| 78 | |
| 79 | const N: u64 = 0xff00_ff00_ff00_ff00; |
| 80 | const M: u64 = 0x00ff_00ff_00ff_00ff; |
| 81 | |
| 82 | for y in y0..y1 { |
| 83 | let start = (y * w as i32 + x0) as usize; |
| 84 | let count = (x1 - x0) as usize; |
| 85 | |
| 86 | let start_ptr = &mut dst[start * Self::PIXEL_SIZE] as *mut u8 as *mut [u8; 8]; |
| 87 | let slice = unsafe { std::slice::from_raw_parts_mut(start_ptr, (count - 1) / 2) }; |
| 88 | for rp in slice.iter_mut() { |
| 89 | let ptr = rp as *mut [u8; 8] as *mut u64; |
| 90 | let d1 = unsafe { ptr.read_unaligned() }; |
| 91 | let mut h = (d1 >> 8) & M; |
| 92 | let mut l = d1 & M; |
| 93 | |
| 94 | #[cfg (target_endian = "little" )] |
| 95 | { |
| 96 | h = (h * (256 - a) + q * a) & N; |
| 97 | l = ((l * (256 - a) + p * a) & N) >> 8; |
| 98 | } |
| 99 | |
| 100 | #[cfg (target_endian = "big" )] |
| 101 | { |
| 102 | h = (h * (256 - a) + p * a) & N; |
| 103 | l = ((l * (256 - a) + q * a) & N) >> 8; |
| 104 | } |
| 105 | |
| 106 | unsafe { |
| 107 | ptr.write_unaligned(h | l); |
| 108 | } |
| 109 | } |
| 110 | |
| 111 | let mut iter = dst[((start + slice.len() * 2) * Self::PIXEL_SIZE) |
| 112 | ..((start + count) * Self::PIXEL_SIZE)] |
| 113 | .iter_mut(); |
| 114 | for _ in (slice.len() * 2)..count { |
| 115 | blend(iter.next().unwrap(), b, a); |
| 116 | blend(iter.next().unwrap(), g, a); |
| 117 | blend(iter.next().unwrap(), r, a); |
| 118 | iter.next(); |
| 119 | } |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | #[allow (clippy::many_single_char_names, clippy::cast_ptr_alignment)] |
| 124 | fn fill_rect_fast( |
| 125 | target: &mut BitMapBackend<'_, Self>, |
| 126 | upper_left: (i32, i32), |
| 127 | bottom_right: (i32, i32), |
| 128 | r: u8, |
| 129 | g: u8, |
| 130 | b: u8, |
| 131 | ) { |
| 132 | let (w, h) = target.get_size(); |
| 133 | let (x0, y0) = ( |
| 134 | upper_left.0.min(bottom_right.0).max(0), |
| 135 | upper_left.1.min(bottom_right.1).max(0), |
| 136 | ); |
| 137 | let (x1, y1) = ( |
| 138 | upper_left.0.max(bottom_right.0).min(w as i32), |
| 139 | upper_left.1.max(bottom_right.1).min(h as i32), |
| 140 | ); |
| 141 | |
| 142 | // This may happen when the minimal value is larger than the limit. |
| 143 | // Thus we just have something that is completely out-of-range |
| 144 | if x0 >= x1 || y0 >= y1 { |
| 145 | return; |
| 146 | } |
| 147 | |
| 148 | let dst = target.get_raw_pixel_buffer(); |
| 149 | |
| 150 | if r == g && g == b { |
| 151 | // If r == g == b, then we can use memset |
| 152 | if x0 != 0 || x1 != w as i32 { |
| 153 | // If it's not the entire row is filled, we can only do |
| 154 | // memset per row |
| 155 | for y in y0..y1 { |
| 156 | let start = (y * w as i32 + x0) as usize; |
| 157 | let count = (x1 - x0) as usize; |
| 158 | dst[(start * Self::PIXEL_SIZE)..((start + count) * Self::PIXEL_SIZE)] |
| 159 | .iter_mut() |
| 160 | .for_each(|e| *e = r); |
| 161 | } |
| 162 | } else { |
| 163 | // If the entire memory block is going to be filled, just use single memset |
| 164 | dst[Self::PIXEL_SIZE * (y0 * w as i32) as usize |
| 165 | ..(y1 * w as i32) as usize * Self::PIXEL_SIZE] |
| 166 | .iter_mut() |
| 167 | .for_each(|e| *e = r); |
| 168 | } |
| 169 | } else { |
| 170 | let count = (x1 - x0) as usize; |
| 171 | if count < 8 { |
| 172 | for y in y0..y1 { |
| 173 | let start = (y * w as i32 + x0) as usize; |
| 174 | let mut iter = dst |
| 175 | [(start * Self::PIXEL_SIZE)..((start + count) * Self::PIXEL_SIZE)] |
| 176 | .iter_mut(); |
| 177 | for _ in 0..count { |
| 178 | *iter.next().unwrap() = b; |
| 179 | *iter.next().unwrap() = g; |
| 180 | *iter.next().unwrap() = r; |
| 181 | iter.next(); |
| 182 | } |
| 183 | } |
| 184 | } else { |
| 185 | for y in y0..y1 { |
| 186 | let start = (y * w as i32 + x0) as usize; |
| 187 | let start_ptr = &mut dst[start * Self::PIXEL_SIZE] as *mut u8 as *mut [u8; 8]; |
| 188 | let slice = |
| 189 | unsafe { std::slice::from_raw_parts_mut(start_ptr, (count - 1) / 2) }; |
| 190 | for p in slice.iter_mut() { |
| 191 | // In this case, we can actually fill 8 pixels in one iteration with |
| 192 | // only 3 movq instructions. |
| 193 | // TODO: Consider using AVX instructions when possible |
| 194 | let ptr = p as *mut [u8; 8] as *mut u64; |
| 195 | unsafe { |
| 196 | let d: u64 = std::mem::transmute([ |
| 197 | b, g, r, 0, b, g, r, 0, // QW1 |
| 198 | ]); |
| 199 | ptr.write_unaligned(d); |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | for idx in (slice.len() * 2)..count { |
| 204 | dst[start * Self::PIXEL_SIZE + idx * Self::PIXEL_SIZE] = b; |
| 205 | dst[start * Self::PIXEL_SIZE + idx * Self::PIXEL_SIZE + 1] = g; |
| 206 | dst[start * Self::PIXEL_SIZE + idx * Self::PIXEL_SIZE + 2] = r; |
| 207 | } |
| 208 | } |
| 209 | } |
| 210 | } |
| 211 | } |
| 212 | } |
| 213 | |