| 1 | // Copyright (C) 2016 The Qt Company Ltd. |
| 2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
| 3 | |
| 4 | #ifndef QDRAWINGPRIMITIVE_SSE2_P_H |
| 5 | #define QDRAWINGPRIMITIVE_SSE2_P_H |
| 6 | |
| 7 | #include <QtGui/private/qtguiglobal_p.h> |
| 8 | #include <private/qsimd_p.h> |
| 9 | #include "qdrawhelper_x86_p.h" |
| 10 | #include "qrgba64_p.h" |
| 11 | |
| 12 | #ifdef __SSE2__ |
| 13 | |
| 14 | // |
| 15 | // W A R N I N G |
| 16 | // ------------- |
| 17 | // |
| 18 | // This file is not part of the Qt API. It exists purely as an |
| 19 | // implementation detail. This header file may change from version to |
| 20 | // version without notice, or even be removed. |
| 21 | // |
| 22 | // We mean it. |
| 23 | // |
| 24 | |
| 25 | QT_BEGIN_NAMESPACE |
| 26 | |
| 27 | /* |
| 28 | * Multiply the components of pixelVector by alphaChannel |
| 29 | * Each 32bits components of alphaChannel must be in the form 0x00AA00AA |
| 30 | * colorMask must have 0x00ff00ff on each 32 bits component |
| 31 | * half must have the value 128 (0x80) for each 32 bits component |
| 32 | */ |
| 33 | #define BYTE_MUL_SSE2(result, pixelVector, alphaChannel, colorMask, half) \ |
| 34 | { \ |
| 35 | /* 1. separate the colors in 2 vectors so each color is on 16 bits \ |
| 36 | (in order to be multiplied by the alpha \ |
| 37 | each 32 bit of dstVectorAG are in the form 0x00AA00GG \ |
| 38 | each 32 bit of dstVectorRB are in the form 0x00RR00BB */\ |
| 39 | __m128i pixelVectorAG = _mm_srli_epi16(pixelVector, 8); \ |
| 40 | __m128i pixelVectorRB = _mm_and_si128(pixelVector, colorMask); \ |
| 41 | \ |
| 42 | /* 2. multiply the vectors by the alpha channel */\ |
| 43 | pixelVectorAG = _mm_mullo_epi16(pixelVectorAG, alphaChannel); \ |
| 44 | pixelVectorRB = _mm_mullo_epi16(pixelVectorRB, alphaChannel); \ |
| 45 | \ |
| 46 | /* 3. divide by 255, that's the tricky part. \ |
| 47 | we do it like for BYTE_MUL(), with bit shift: X/255 ~= (X + X/256 + rounding)/256 */ \ |
| 48 | /** so first (X + X/256 + rounding) */\ |
| 49 | pixelVectorRB = _mm_add_epi16(pixelVectorRB, _mm_srli_epi16(pixelVectorRB, 8)); \ |
| 50 | pixelVectorRB = _mm_add_epi16(pixelVectorRB, half); \ |
| 51 | pixelVectorAG = _mm_add_epi16(pixelVectorAG, _mm_srli_epi16(pixelVectorAG, 8)); \ |
| 52 | pixelVectorAG = _mm_add_epi16(pixelVectorAG, half); \ |
| 53 | \ |
| 54 | /** second divide by 256 */\ |
| 55 | pixelVectorRB = _mm_srli_epi16(pixelVectorRB, 8); \ |
| 56 | /** for AG, we could >> 8 to divide followed by << 8 to put the \ |
| 57 | bytes in the correct position. By masking instead, we execute \ |
| 58 | only one instruction */\ |
| 59 | pixelVectorAG = _mm_andnot_si128(colorMask, pixelVectorAG); \ |
| 60 | \ |
| 61 | /* 4. combine the 2 pairs of colors */ \ |
| 62 | result = _mm_or_si128(pixelVectorAG, pixelVectorRB); \ |
| 63 | } |
| 64 | |
| 65 | /* |
| 66 | * Each 32bits components of alphaChannel must be in the form 0x00AA00AA |
| 67 | * oneMinusAlphaChannel must be 255 - alpha for each 32 bits component |
| 68 | * colorMask must have 0x00ff00ff on each 32 bits component |
| 69 | * half must have the value 128 (0x80) for each 32 bits component |
| 70 | */ |
| 71 | #define INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, alphaChannel, oneMinusAlphaChannel, colorMask, half) { \ |
| 72 | /* interpolate AG */\ |
| 73 | __m128i srcVectorAG = _mm_srli_epi16(srcVector, 8); \ |
| 74 | __m128i dstVectorAG = _mm_srli_epi16(dstVector, 8); \ |
| 75 | __m128i srcVectorAGalpha = _mm_mullo_epi16(srcVectorAG, alphaChannel); \ |
| 76 | __m128i dstVectorAGoneMinusAlphalpha = _mm_mullo_epi16(dstVectorAG, oneMinusAlphaChannel); \ |
| 77 | __m128i finalAG = _mm_add_epi16(srcVectorAGalpha, dstVectorAGoneMinusAlphalpha); \ |
| 78 | finalAG = _mm_add_epi16(finalAG, _mm_srli_epi16(finalAG, 8)); \ |
| 79 | finalAG = _mm_add_epi16(finalAG, half); \ |
| 80 | finalAG = _mm_andnot_si128(colorMask, finalAG); \ |
| 81 | \ |
| 82 | /* interpolate RB */\ |
| 83 | __m128i srcVectorRB = _mm_and_si128(srcVector, colorMask); \ |
| 84 | __m128i dstVectorRB = _mm_and_si128(dstVector, colorMask); \ |
| 85 | __m128i srcVectorRBalpha = _mm_mullo_epi16(srcVectorRB, alphaChannel); \ |
| 86 | __m128i dstVectorRBoneMinusAlphalpha = _mm_mullo_epi16(dstVectorRB, oneMinusAlphaChannel); \ |
| 87 | __m128i finalRB = _mm_add_epi16(srcVectorRBalpha, dstVectorRBoneMinusAlphalpha); \ |
| 88 | finalRB = _mm_add_epi16(finalRB, _mm_srli_epi16(finalRB, 8)); \ |
| 89 | finalRB = _mm_add_epi16(finalRB, half); \ |
| 90 | finalRB = _mm_srli_epi16(finalRB, 8); \ |
| 91 | \ |
| 92 | /* combine */\ |
| 93 | result = _mm_or_si128(finalAG, finalRB); \ |
| 94 | } |
| 95 | |
| 96 | // same as BLEND_SOURCE_OVER_ARGB32_SSE2, but for one vector srcVector |
| 97 | #define BLEND_SOURCE_OVER_ARGB32_SSE2_helper(dst, srcVector, nullVector, half, one, colorMask, alphaMask) { \ |
| 98 | const __m128i srcVectorAlpha = _mm_and_si128(srcVector, alphaMask); \ |
| 99 | if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, alphaMask)) == 0xffff) { \ |
| 100 | /* all opaque */ \ |
| 101 | _mm_store_si128((__m128i *)&dst[x], srcVector); \ |
| 102 | } else if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, nullVector)) != 0xffff) { \ |
| 103 | /* not fully transparent */ \ |
| 104 | /* extract the alpha channel on 2 x 16 bits */ \ |
| 105 | /* so we have room for the multiplication */ \ |
| 106 | /* each 32 bits will be in the form 0x00AA00AA */ \ |
| 107 | /* with A being the 1 - alpha */ \ |
| 108 | __m128i alphaChannel = _mm_srli_epi32(srcVector, 24); \ |
| 109 | alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16)); \ |
| 110 | alphaChannel = _mm_sub_epi16(one, alphaChannel); \ |
| 111 | \ |
| 112 | const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]); \ |
| 113 | __m128i destMultipliedByOneMinusAlpha; \ |
| 114 | BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half); \ |
| 115 | \ |
| 116 | /* result = s + d * (1-alpha) */\ |
| 117 | const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha); \ |
| 118 | _mm_store_si128((__m128i *)&dst[x], result); \ |
| 119 | } \ |
| 120 | } |
| 121 | |
| 122 | |
| 123 | // Basically blend src over dst with the const alpha defined as constAlphaVector. |
| 124 | // nullVector, half, one, colorMask are constant across the whole image/texture, and should be defined as: |
| 125 | //const __m128i nullVector = _mm_set1_epi32(0); |
| 126 | //const __m128i half = _mm_set1_epi16(0x80); |
| 127 | //const __m128i one = _mm_set1_epi16(0xff); |
| 128 | //const __m128i colorMask = _mm_set1_epi32(0x00ff00ff); |
| 129 | //const __m128i alphaMask = _mm_set1_epi32(0xff000000); |
| 130 | // |
| 131 | // The computation being done is: |
| 132 | // result = s + d * (1-alpha) |
| 133 | // with shortcuts if fully opaque or fully transparent. |
| 134 | #define BLEND_SOURCE_OVER_ARGB32_SSE2(dst, src, length, nullVector, half, one, colorMask, alphaMask) { \ |
| 135 | int x = 0; \ |
| 136 | \ |
| 137 | /* First, get dst aligned. */ \ |
| 138 | ALIGNMENT_PROLOGUE_16BYTES(dst, x, length) { \ |
| 139 | blend_pixel(dst[x], src[x]); \ |
| 140 | } \ |
| 141 | \ |
| 142 | for (; x < length-3; x += 4) { \ |
| 143 | const __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[x]); \ |
| 144 | BLEND_SOURCE_OVER_ARGB32_SSE2_helper(dst, srcVector, nullVector, half, one, colorMask, alphaMask) \ |
| 145 | } \ |
| 146 | SIMD_EPILOGUE(x, length, 3) { \ |
| 147 | blend_pixel(dst[x], src[x]); \ |
| 148 | } \ |
| 149 | } |
| 150 | |
| 151 | // Basically blend src over dst with the const alpha defined as constAlphaVector. |
| 152 | // nullVector, half, one, colorMask are constant across the whole image/texture, and should be defined as: |
| 153 | //const __m128i nullVector = _mm_set1_epi32(0); |
| 154 | //const __m128i half = _mm_set1_epi16(0x80); |
| 155 | //const __m128i one = _mm_set1_epi16(0xff); |
| 156 | //const __m128i colorMask = _mm_set1_epi32(0x00ff00ff); |
| 157 | // |
| 158 | // The computation being done is: |
| 159 | // dest = (s + d * sia) * ca + d * cia |
| 160 | // = s * ca + d * (sia * ca + cia) |
| 161 | // = s * ca + d * (1 - sa*ca) |
| 162 | #define BLEND_SOURCE_OVER_ARGB32_WITH_CONST_ALPHA_SSE2(dst, src, length, nullVector, half, one, colorMask, constAlphaVector) \ |
| 163 | { \ |
| 164 | int x = 0; \ |
| 165 | \ |
| 166 | ALIGNMENT_PROLOGUE_16BYTES(dst, x, length) { \ |
| 167 | blend_pixel(dst[x], src[x], const_alpha); \ |
| 168 | } \ |
| 169 | \ |
| 170 | for (; x < length-3; x += 4) { \ |
| 171 | __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[x]); \ |
| 172 | if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff) { \ |
| 173 | BYTE_MUL_SSE2(srcVector, srcVector, constAlphaVector, colorMask, half); \ |
| 174 | \ |
| 175 | __m128i alphaChannel = _mm_srli_epi32(srcVector, 24); \ |
| 176 | alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16)); \ |
| 177 | alphaChannel = _mm_sub_epi16(one, alphaChannel); \ |
| 178 | \ |
| 179 | const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]); \ |
| 180 | __m128i destMultipliedByOneMinusAlpha; \ |
| 181 | BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half); \ |
| 182 | \ |
| 183 | const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha); \ |
| 184 | _mm_store_si128((__m128i *)&dst[x], result); \ |
| 185 | } \ |
| 186 | } \ |
| 187 | SIMD_EPILOGUE(x, length, 3) { \ |
| 188 | blend_pixel(dst[x], src[x], const_alpha); \ |
| 189 | } \ |
| 190 | } |
| 191 | |
| 192 | QT_END_NAMESPACE |
| 193 | |
| 194 | #endif // __SSE2__ |
| 195 | |
| 196 | QT_BEGIN_NAMESPACE |
| 197 | #if QT_COMPILER_SUPPORTS_HERE(SSE4_1) |
| 198 | QT_FUNCTION_TARGET(SSE2) |
| 199 | static inline void Q_DECL_VECTORCALL reciprocal_mul_ss(__m128 &ia, const __m128 a, float mul) |
| 200 | { |
| 201 | ia = _mm_rcp_ss(a: a); // Approximate 1/a |
| 202 | // Improve precision of ia using Newton-Raphson |
| 203 | ia = _mm_sub_ss(a: _mm_add_ss(a: ia, b: ia), b: _mm_mul_ss(a: ia, b: _mm_mul_ss(a: ia, b: a))); |
| 204 | ia = _mm_mul_ss(a: ia, b: _mm_set_ss(w: mul)); |
| 205 | ia = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(0,0,0,0)); |
| 206 | } |
| 207 | |
| 208 | QT_FUNCTION_TARGET(SSE4_1) |
| 209 | static inline QRgb qUnpremultiply_sse4(QRgb p) |
| 210 | { |
| 211 | const uint alpha = qAlpha(rgb: p); |
| 212 | if (alpha == 255) |
| 213 | return p; |
| 214 | if (alpha == 0) |
| 215 | return 0; |
| 216 | const __m128 va = _mm_set1_ps(w: alpha); |
| 217 | __m128 via; |
| 218 | reciprocal_mul_ss(ia&: via, a: va, mul: 255.0f); // Approximate 1/a |
| 219 | __m128i vl = _mm_cvtepu8_epi32(V: _mm_cvtsi32_si128(a: p)); |
| 220 | vl = _mm_cvtps_epi32(a: _mm_mul_ps(a: _mm_cvtepi32_ps(a: vl), b: via)); |
| 221 | vl = _mm_packus_epi32(V1: vl, V2: vl); |
| 222 | vl = _mm_insert_epi16(vl, alpha, 3); |
| 223 | vl = _mm_packus_epi16(a: vl, b: vl); |
| 224 | return _mm_cvtsi128_si32(a: vl); |
| 225 | } |
| 226 | |
| 227 | template<enum QtPixelOrder PixelOrder> |
| 228 | QT_FUNCTION_TARGET(SSE4_1) |
| 229 | static inline uint qConvertArgb32ToA2rgb30_sse4(QRgb p) |
| 230 | { |
| 231 | const uint alpha = qAlpha(rgb: p); |
| 232 | if (alpha == 255) |
| 233 | return qConvertRgb32ToRgb30<PixelOrder>(p); |
| 234 | if (alpha == 0) |
| 235 | return 0; |
| 236 | constexpr float mult = 1023.0f / (255 >> 6); |
| 237 | const uint newalpha = (alpha >> 6); |
| 238 | const __m128 va = _mm_set1_ps(w: alpha); |
| 239 | __m128 via; |
| 240 | reciprocal_mul_ss(ia&: via, a: va, mul: mult * newalpha); |
| 241 | __m128i vl = _mm_cvtsi32_si128(a: p); |
| 242 | vl = _mm_cvtepu8_epi32(V: vl); |
| 243 | vl = _mm_cvtps_epi32(a: _mm_mul_ps(a: _mm_cvtepi32_ps(a: vl), b: via)); |
| 244 | vl = _mm_packus_epi32(V1: vl, V2: vl); |
| 245 | uint rgb30 = (newalpha << 30); |
| 246 | rgb30 |= ((uint)_mm_extract_epi16(vl, 1)) << 10; |
| 247 | if (PixelOrder == PixelOrderRGB) { |
| 248 | rgb30 |= ((uint)_mm_extract_epi16(vl, 2)) << 20; |
| 249 | rgb30 |= ((uint)_mm_extract_epi16(vl, 0)); |
| 250 | } else { |
| 251 | rgb30 |= ((uint)_mm_extract_epi16(vl, 0)) << 20; |
| 252 | rgb30 |= ((uint)_mm_extract_epi16(vl, 2)); |
| 253 | } |
| 254 | return rgb30; |
| 255 | } |
| 256 | |
| 257 | template<enum QtPixelOrder PixelOrder> |
| 258 | QT_FUNCTION_TARGET(SSE4_1) |
| 259 | static inline uint qConvertRgba64ToRgb32_sse4(QRgba64 p) |
| 260 | { |
| 261 | if (p.isTransparent()) |
| 262 | return 0; |
| 263 | __m128i vl = _mm_loadl_epi64(p: reinterpret_cast<const __m128i *>(&p)); |
| 264 | if (!p.isOpaque()) { |
| 265 | const __m128 va = _mm_set1_ps(w: p.alpha()); |
| 266 | __m128 via; |
| 267 | reciprocal_mul_ss(ia&: via, a: va, mul: 65535.0f); |
| 268 | vl = _mm_unpacklo_epi16(a: vl, b: _mm_setzero_si128()); |
| 269 | vl = _mm_cvtps_epi32(a: _mm_mul_ps(a: _mm_cvtepi32_ps(a: vl) , b: via)); |
| 270 | vl = _mm_packus_epi32(V1: vl, V2: vl); |
| 271 | vl = _mm_insert_epi16(vl, p.alpha(), 3); |
| 272 | } |
| 273 | if (PixelOrder == PixelOrderBGR) |
| 274 | vl = _mm_shufflelo_epi16(vl, _MM_SHUFFLE(3, 0, 1, 2)); |
| 275 | return toArgb32(v: vl); |
| 276 | } |
| 277 | #endif |
| 278 | QT_END_NAMESPACE |
| 279 | |
| 280 | #endif // QDRAWINGPRIMITIVE_SSE2_P_H |
| 281 | |