| 1 | use crate::consts::{QOI_OP_DIFF, QOI_OP_LUMA, QOI_OP_RGB, QOI_OP_RGBA}; |
| 2 | use crate::error::Result; |
| 3 | use crate::utils::Writer; |
| 4 | use bytemuck::{cast, Pod}; |
| 5 | |
| 6 | #[derive (Copy, Clone, PartialEq, Eq, Debug)] |
| 7 | #[repr (transparent)] |
| 8 | pub struct Pixel<const N: usize>([u8; N]); |
| 9 | |
| 10 | impl<const N: usize> Pixel<N> { |
| 11 | #[inline ] |
| 12 | pub const fn new() -> Self { |
| 13 | Self([0; N]) |
| 14 | } |
| 15 | |
| 16 | #[inline ] |
| 17 | pub fn read(&mut self, s: &[u8]) { |
| 18 | if s.len() == N { |
| 19 | let mut i = 0; |
| 20 | while i < N { |
| 21 | self.0[i] = s[i]; |
| 22 | i += 1; |
| 23 | } |
| 24 | } else { |
| 25 | unreachable!(); |
| 26 | } |
| 27 | } |
| 28 | |
| 29 | #[inline ] |
| 30 | pub fn update<const M: usize>(&mut self, px: Pixel<M>) { |
| 31 | let mut i = 0; |
| 32 | while i < M && i < N { |
| 33 | self.0[i] = px.0[i]; |
| 34 | i += 1; |
| 35 | } |
| 36 | } |
| 37 | |
| 38 | #[inline ] |
| 39 | pub fn update_rgb(&mut self, r: u8, g: u8, b: u8) { |
| 40 | self.0[0] = r; |
| 41 | self.0[1] = g; |
| 42 | self.0[2] = b; |
| 43 | } |
| 44 | |
| 45 | #[inline ] |
| 46 | pub fn update_rgba(&mut self, r: u8, g: u8, b: u8, a: u8) { |
| 47 | self.0[0] = r; |
| 48 | self.0[1] = g; |
| 49 | self.0[2] = b; |
| 50 | if N >= 4 { |
| 51 | self.0[3] = a; |
| 52 | } |
| 53 | } |
| 54 | |
| 55 | #[inline ] |
| 56 | pub fn update_diff(&mut self, b1: u8) { |
| 57 | self.0[0] = self.0[0].wrapping_add((b1 >> 4) & 0x03).wrapping_sub(2); |
| 58 | self.0[1] = self.0[1].wrapping_add((b1 >> 2) & 0x03).wrapping_sub(2); |
| 59 | self.0[2] = self.0[2].wrapping_add(b1 & 0x03).wrapping_sub(2); |
| 60 | } |
| 61 | |
| 62 | #[inline ] |
| 63 | pub fn update_luma(&mut self, b1: u8, b2: u8) { |
| 64 | let vg = (b1 & 0x3f).wrapping_sub(32); |
| 65 | let vg_8 = vg.wrapping_sub(8); |
| 66 | let vr = vg_8.wrapping_add((b2 >> 4) & 0x0f); |
| 67 | let vb = vg_8.wrapping_add(b2 & 0x0f); |
| 68 | self.0[0] = self.0[0].wrapping_add(vr); |
| 69 | self.0[1] = self.0[1].wrapping_add(vg); |
| 70 | self.0[2] = self.0[2].wrapping_add(vb); |
| 71 | } |
| 72 | |
| 73 | #[inline ] |
| 74 | pub const fn as_rgba(self, with_a: u8) -> Pixel<4> { |
| 75 | let mut i = 0; |
| 76 | let mut out = Pixel::new(); |
| 77 | while i < N { |
| 78 | out.0[i] = self.0[i]; |
| 79 | i += 1; |
| 80 | } |
| 81 | if N < 4 { |
| 82 | out.0[3] = with_a; |
| 83 | } |
| 84 | out |
| 85 | } |
| 86 | |
| 87 | #[inline ] |
| 88 | pub const fn r(self) -> u8 { |
| 89 | self.0[0] |
| 90 | } |
| 91 | |
| 92 | #[inline ] |
| 93 | pub const fn g(self) -> u8 { |
| 94 | self.0[1] |
| 95 | } |
| 96 | |
| 97 | #[inline ] |
| 98 | pub const fn b(self) -> u8 { |
| 99 | self.0[2] |
| 100 | } |
| 101 | |
| 102 | #[inline ] |
| 103 | pub const fn with_a(mut self, value: u8) -> Self { |
| 104 | if N >= 4 { |
| 105 | self.0[3] = value; |
| 106 | } |
| 107 | self |
| 108 | } |
| 109 | |
| 110 | #[inline ] |
| 111 | pub const fn a_or(self, value: u8) -> u8 { |
| 112 | if N < 4 { |
| 113 | value |
| 114 | } else { |
| 115 | self.0[3] |
| 116 | } |
| 117 | } |
| 118 | |
| 119 | #[inline ] |
| 120 | #[allow (clippy::cast_lossless, clippy::cast_possible_truncation)] |
| 121 | pub fn hash_index(self) -> u8 |
| 122 | where |
| 123 | [u8; N]: Pod, |
| 124 | { |
| 125 | // credits for the initial idea: @zakarumych |
| 126 | let v = if N == 4 { |
| 127 | u32::from_ne_bytes(cast(self.0)) |
| 128 | } else { |
| 129 | u32::from_ne_bytes([self.0[0], self.0[1], self.0[2], 0xff]) |
| 130 | } as u64; |
| 131 | let s = ((v & 0xff00_ff00) << 32) | (v & 0x00ff_00ff); |
| 132 | s.wrapping_mul(0x0300_0700_0005_000b_u64).to_le().swap_bytes() as u8 & 63 |
| 133 | } |
| 134 | |
| 135 | #[inline ] |
| 136 | pub fn rgb_add(&mut self, r: u8, g: u8, b: u8) { |
| 137 | self.0[0] = self.0[0].wrapping_add(r); |
| 138 | self.0[1] = self.0[1].wrapping_add(g); |
| 139 | self.0[2] = self.0[2].wrapping_add(b); |
| 140 | } |
| 141 | |
| 142 | #[inline ] |
| 143 | pub fn encode_into<W: Writer>(&self, px_prev: Self, buf: W) -> Result<W> { |
| 144 | if N == 3 || self.a_or(0) == px_prev.a_or(0) { |
| 145 | let vg = self.g().wrapping_sub(px_prev.g()); |
| 146 | let vg_32 = vg.wrapping_add(32); |
| 147 | if vg_32 | 63 == 63 { |
| 148 | let vr = self.r().wrapping_sub(px_prev.r()); |
| 149 | let vb = self.b().wrapping_sub(px_prev.b()); |
| 150 | let vg_r = vr.wrapping_sub(vg); |
| 151 | let vg_b = vb.wrapping_sub(vg); |
| 152 | let (vr_2, vg_2, vb_2) = |
| 153 | (vr.wrapping_add(2), vg.wrapping_add(2), vb.wrapping_add(2)); |
| 154 | if vr_2 | vg_2 | vb_2 | 3 == 3 { |
| 155 | buf.write_one(QOI_OP_DIFF | vr_2 << 4 | vg_2 << 2 | vb_2) |
| 156 | } else { |
| 157 | let (vg_r_8, vg_b_8) = (vg_r.wrapping_add(8), vg_b.wrapping_add(8)); |
| 158 | if vg_r_8 | vg_b_8 | 15 == 15 { |
| 159 | buf.write_many(&[QOI_OP_LUMA | vg_32, vg_r_8 << 4 | vg_b_8]) |
| 160 | } else { |
| 161 | buf.write_many(&[QOI_OP_RGB, self.r(), self.g(), self.b()]) |
| 162 | } |
| 163 | } |
| 164 | } else { |
| 165 | buf.write_many(&[QOI_OP_RGB, self.r(), self.g(), self.b()]) |
| 166 | } |
| 167 | } else { |
| 168 | buf.write_many(&[QOI_OP_RGBA, self.r(), self.g(), self.b(), self.a_or(0xff)]) |
| 169 | } |
| 170 | } |
| 171 | } |
| 172 | |
| 173 | impl<const N: usize> From<Pixel<N>> for [u8; N] { |
| 174 | #[inline (always)] |
| 175 | fn from(px: Pixel<N>) -> Self { |
| 176 | px.0 |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | pub trait SupportedChannels {} |
| 181 | |
| 182 | impl SupportedChannels for Pixel<3> {} |
| 183 | impl SupportedChannels for Pixel<4> {} |
| 184 | |