| 1 | // Copyright 2009-2021 Intel Corporation | 
| 2 | // SPDX-License-Identifier: Apache-2.0 | 
| 3 |  | 
| 4 | #pragma once | 
| 5 |  | 
| 6 | #define vboolf vboolf_impl | 
| 7 | #define vboold vboold_impl | 
| 8 | #define vint vint_impl | 
| 9 | #define vuint vuint_impl | 
| 10 | #define vllong vllong_impl | 
| 11 | #define vfloat vfloat_impl | 
| 12 | #define vdouble vdouble_impl | 
| 13 |  | 
| 14 | namespace embree | 
| 15 | { | 
| 16 |   /* 4-wide SSE float type */ | 
| 17 |   template<> | 
| 18 |   struct vfloat<4> | 
| 19 |   { | 
| 20 |     ALIGNED_STRUCT_(16); | 
| 21 |      | 
| 22 |     typedef vboolf4 Bool; | 
| 23 |     typedef vint4   Int; | 
| 24 |     typedef vfloat4 Float; | 
| 25 |      | 
| 26 |     enum  { size = 4 };                        // number of SIMD elements | 
| 27 |     union { __m128 v; float f[4]; int i[4]; }; // data | 
| 28 |  | 
| 29 |     //////////////////////////////////////////////////////////////////////////////// | 
| 30 |     /// Constructors, Assignment & Cast Operators | 
| 31 |     //////////////////////////////////////////////////////////////////////////////// | 
| 32 |      | 
| 33 |     __forceinline vfloat() {} | 
| 34 |     __forceinline vfloat(const vfloat4& other) { v = other.v; } | 
| 35 |     __forceinline vfloat4& operator =(const vfloat4& other) { v = other.v; return *this; } | 
| 36 |  | 
| 37 |     __forceinline vfloat(__m128 a) : v(a) {} | 
| 38 |     __forceinline operator const __m128&() const { return v; } | 
| 39 |     __forceinline operator       __m128&()       { return v; } | 
| 40 |  | 
| 41 |     __forceinline vfloat(float a) : v(_mm_set1_ps(w: a)) {} | 
| 42 |     __forceinline vfloat(float a, float b, float c, float d) : v(_mm_set_ps(z: d, y: c, x: b, w: a)) {} | 
| 43 |  | 
| 44 |     __forceinline explicit vfloat(const vint4& a) : v(_mm_cvtepi32_ps(a: a)) {} | 
| 45 |     __forceinline explicit vfloat(const vuint4& x) { | 
| 46 |       const __m128i a   = _mm_and_si128(a: x,b: _mm_set1_epi32(i: 0x7FFFFFFF)); | 
| 47 |       const __m128i b   = _mm_and_si128(a: _mm_srai_epi32(a: x,count: 31),b: _mm_set1_epi32(i: 0x4F000000)); //0x4F000000 = 2^31  | 
| 48 |       const __m128  af  = _mm_cvtepi32_ps(a: a); | 
| 49 |       const __m128  bf  = _mm_castsi128_ps(a: b);   | 
| 50 |       v  = _mm_add_ps(a: af,b: bf); | 
| 51 |     } | 
| 52 |  | 
| 53 |     //////////////////////////////////////////////////////////////////////////////// | 
| 54 |     /// Constants | 
| 55 |     //////////////////////////////////////////////////////////////////////////////// | 
| 56 |  | 
| 57 |     __forceinline vfloat(ZeroTy)   : v(_mm_setzero_ps()) {} | 
| 58 |     __forceinline vfloat(OneTy)    : v(_mm_set1_ps(w: 1.0f)) {} | 
| 59 |     __forceinline vfloat(PosInfTy) : v(_mm_set1_ps(w: pos_inf)) {} | 
| 60 |     __forceinline vfloat(NegInfTy) : v(_mm_set1_ps(w: neg_inf)) {} | 
| 61 |     __forceinline vfloat(StepTy)   : v(_mm_set_ps(z: 3.0f, y: 2.0f, x: 1.0f, w: 0.0f)) {} | 
| 62 |     __forceinline vfloat(NaNTy)    : v(_mm_set1_ps(w: nan)) {} | 
| 63 |     __forceinline vfloat(UndefinedTy) : v(_mm_undefined_ps()) {} | 
| 64 |  | 
| 65 |     //////////////////////////////////////////////////////////////////////////////// | 
| 66 |     /// Loads and Stores | 
| 67 |     //////////////////////////////////////////////////////////////////////////////// | 
| 68 |  | 
| 69 |     static __forceinline vfloat4 load (const void* a) { return _mm_load_ps(p: (float*)a); } | 
| 70 |     static __forceinline vfloat4 loadu(const void* a) { return _mm_loadu_ps(p: (float*)a); } | 
| 71 |  | 
| 72 |     static __forceinline void store (void* ptr, const vfloat4& v) { _mm_store_ps(p: (float*)ptr,a: v); } | 
| 73 |     static __forceinline void storeu(void* ptr, const vfloat4& v) { _mm_storeu_ps(p: (float*)ptr,a: v); } | 
| 74 |  | 
| 75 | #if defined(__AVX512VL__) | 
| 76 |  | 
| 77 |     static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return _mm_mask_load_ps (_mm_setzero_ps(),mask,(float*)ptr); } | 
| 78 |     static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return _mm_mask_loadu_ps(_mm_setzero_ps(),mask,(float*)ptr); } | 
| 79 |  | 
| 80 |     static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_mask_store_ps ((float*)ptr,mask,v); } | 
| 81 |     static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_mask_storeu_ps((float*)ptr,mask,v); } | 
| 82 | #elif defined(__AVX__) | 
| 83 |     static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return _mm_maskload_ps((float*)ptr,mask); } | 
| 84 |     static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return _mm_maskload_ps((float*)ptr,mask); } | 
| 85 |  | 
| 86 |     static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,v); } | 
| 87 |     static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,v); } | 
| 88 | #else | 
| 89 |     static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return _mm_and_ps(a: _mm_load_ps (p: (float*)ptr),b: mask); } | 
| 90 |     static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return _mm_and_ps(a: _mm_loadu_ps(p: (float*)ptr),b: mask); } | 
| 91 |  | 
| 92 |     static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { store (ptr,v: select(m: mask,t: v,f: load (a: ptr))); } | 
| 93 |     static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { storeu(ptr,v: select(m: mask,t: v,f: loadu(a: ptr))); } | 
| 94 | #endif | 
| 95 |  | 
| 96 | #if defined(__AVX__) | 
| 97 |     static __forceinline vfloat4 broadcast(const void* a) { return _mm_broadcast_ss((float*)a); } | 
| 98 | #else | 
| 99 |     static __forceinline vfloat4 broadcast(const void* a) { return _mm_set1_ps(w: *(float*)a); } | 
| 100 | #endif | 
| 101 |  | 
| 102 |     static __forceinline vfloat4 load_nt (const float* ptr) { | 
| 103 | #if defined (__SSE4_1__) | 
| 104 |     return _mm_castsi128_ps(_mm_stream_load_si128((__m128i*)ptr)); | 
| 105 | #else | 
| 106 |     return _mm_load_ps(p: ptr);  | 
| 107 | #endif | 
| 108 |   } | 
| 109 |  | 
| 110 | #if defined(__SSE4_1__) | 
| 111 |     static __forceinline vfloat4 load(const char* ptr) { | 
| 112 |       return _mm_cvtepi32_ps(_mm_cvtepi8_epi32(_mm_loadu_si128((__m128i*)ptr))); | 
| 113 |     } | 
| 114 | #else | 
| 115 |     static __forceinline vfloat4 load(const char* ptr) { | 
| 116 |       return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]); | 
| 117 |     } | 
| 118 | #endif | 
| 119 |  | 
| 120 | #if defined(__SSE4_1__) | 
| 121 |     static __forceinline vfloat4 load(const unsigned char* ptr) { | 
| 122 |       return _mm_cvtepi32_ps(_mm_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr))); | 
| 123 |     } | 
| 124 | #else | 
| 125 |     static __forceinline vfloat4 load(const unsigned char* ptr) { | 
| 126 |       //return _mm_cvtpu8_ps(*(__m64*)ptr); // don't enable, will use MMX instructions | 
| 127 |       return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]); | 
| 128 |     } | 
| 129 | #endif | 
| 130 |  | 
| 131 | #if defined(__SSE4_1__) | 
| 132 |     static __forceinline vfloat4 load(const short* ptr) { | 
| 133 |       return _mm_cvtepi32_ps(_mm_cvtepi16_epi32(_mm_loadu_si128((__m128i*)ptr))); | 
| 134 |     } | 
| 135 | #else | 
| 136 |     static __forceinline vfloat4 load(const short* ptr) { | 
| 137 |       return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]); | 
| 138 |     } | 
| 139 | #endif | 
| 140 |  | 
| 141 |     static __forceinline vfloat4 load(const unsigned short* ptr) { | 
| 142 |       return _mm_mul_ps(a: vfloat4(vint4::load(ptr)),b: vfloat4(1.0f/65535.0f)); | 
| 143 |     } | 
| 144 |      | 
| 145 |     static __forceinline void store_nt(void* ptr, const vfloat4& v) | 
| 146 |     { | 
| 147 | #if defined (__SSE4_1__) | 
| 148 |       _mm_stream_ps((float*)ptr,v); | 
| 149 | #else | 
| 150 |       _mm_store_ps(p: (float*)ptr,a: v); | 
| 151 | #endif | 
| 152 |     } | 
| 153 |  | 
| 154 |     template<int scale = 4> | 
| 155 |     static __forceinline vfloat4 gather(const float* ptr, const vint4& index) { | 
| 156 | #if defined(__AVX2__) | 
| 157 |       return _mm_i32gather_ps(ptr, index, scale); | 
| 158 | #else | 
| 159 |       return vfloat4( | 
| 160 |         *(float*)(((char*)ptr)+scale*index[0]), | 
| 161 |         *(float*)(((char*)ptr)+scale*index[1]), | 
| 162 |         *(float*)(((char*)ptr)+scale*index[2]), | 
| 163 |         *(float*)(((char*)ptr)+scale*index[3])); | 
| 164 | #endif | 
| 165 |     } | 
| 166 |  | 
| 167 |     template<int scale = 4> | 
| 168 |     static __forceinline vfloat4 gather(const vboolf4& mask, const float* ptr, const vint4& index) { | 
| 169 |       vfloat4 r = zero; | 
| 170 | #if defined(__AVX512VL__) | 
| 171 |       return _mm_mmask_i32gather_ps(r, mask, index, ptr, scale); | 
| 172 | #elif defined(__AVX2__) | 
| 173 |       return _mm_mask_i32gather_ps(r, ptr, index, mask, scale); | 
| 174 | #else | 
| 175 |       if (likely(mask[0])) r[0] = *(float*)(((char*)ptr)+scale*index[0]); | 
| 176 |       if (likely(mask[1])) r[1] = *(float*)(((char*)ptr)+scale*index[1]); | 
| 177 |       if (likely(mask[2])) r[2] = *(float*)(((char*)ptr)+scale*index[2]); | 
| 178 |       if (likely(mask[3])) r[3] = *(float*)(((char*)ptr)+scale*index[3]); | 
| 179 |       return r; | 
| 180 | #endif | 
| 181 |     } | 
| 182 |  | 
| 183 |     template<int scale = 4> | 
| 184 |     static __forceinline void scatter(void* ptr, const vint4& index, const vfloat4& v) | 
| 185 |     { | 
| 186 | #if defined(__AVX512VL__) | 
| 187 |       _mm_i32scatter_ps((float*)ptr, index, v, scale); | 
| 188 | #else | 
| 189 |       *(float*)(((char*)ptr)+scale*index[0]) = v[0]; | 
| 190 |       *(float*)(((char*)ptr)+scale*index[1]) = v[1]; | 
| 191 |       *(float*)(((char*)ptr)+scale*index[2]) = v[2]; | 
| 192 |       *(float*)(((char*)ptr)+scale*index[3]) = v[3]; | 
| 193 | #endif | 
| 194 |     } | 
| 195 |  | 
| 196 |     template<int scale = 4> | 
| 197 |     static __forceinline void scatter(const vboolf4& mask, void* ptr, const vint4& index, const vfloat4& v) | 
| 198 |     { | 
| 199 | #if defined(__AVX512VL__) | 
| 200 |       _mm_mask_i32scatter_ps((float*)ptr ,mask, index, v, scale); | 
| 201 | #else | 
| 202 |       if (likely(mask[0])) *(float*)(((char*)ptr)+scale*index[0]) = v[0]; | 
| 203 |       if (likely(mask[1])) *(float*)(((char*)ptr)+scale*index[1]) = v[1]; | 
| 204 |       if (likely(mask[2])) *(float*)(((char*)ptr)+scale*index[2]) = v[2]; | 
| 205 |       if (likely(mask[3])) *(float*)(((char*)ptr)+scale*index[3]) = v[3]; | 
| 206 | #endif | 
| 207 |     } | 
| 208 |  | 
| 209 |     static __forceinline void store(const vboolf4& mask, char* ptr, const vint4& ofs, const vfloat4& v) { | 
| 210 |       scatter<1>(mask,ptr,index: ofs,v); | 
| 211 |     } | 
| 212 |     static __forceinline void store(const vboolf4& mask, float* ptr, const vint4& ofs, const vfloat4& v) { | 
| 213 |       scatter<4>(mask,ptr,index: ofs,v); | 
| 214 |     } | 
| 215 |      | 
| 216 |     //////////////////////////////////////////////////////////////////////////////// | 
| 217 |     /// Array Access | 
| 218 |     //////////////////////////////////////////////////////////////////////////////// | 
| 219 |  | 
| 220 |     __forceinline const float& operator [](size_t index) const { assert(index < 4); return f[index]; } | 
| 221 |     __forceinline       float& operator [](size_t index)       { assert(index < 4); return f[index]; } | 
| 222 |  | 
| 223 |     friend __forceinline vfloat4 select(const vboolf4& m, const vfloat4& t, const vfloat4& f) { | 
| 224 | #if defined(__AVX512VL__) | 
| 225 |       return _mm_mask_blend_ps(m, f, t); | 
| 226 | #elif defined(__SSE4_1__) | 
| 227 |       return _mm_blendv_ps(f, t, m);  | 
| 228 | #else | 
| 229 |       return _mm_or_ps(a: _mm_and_ps(a: m, b: t), b: _mm_andnot_ps(a: m, b: f));  | 
| 230 | #endif | 
| 231 |     } | 
| 232 |   }; | 
| 233 |  | 
| 234 |   //////////////////////////////////////////////////////////////////////////////// | 
| 235 |   /// Load/Store | 
| 236 |   //////////////////////////////////////////////////////////////////////////////// | 
| 237 |  | 
| 238 |   template<> struct mem<vfloat4> | 
| 239 |   { | 
| 240 |     static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return vfloat4::load (mask,ptr); } | 
| 241 |     static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return vfloat4::loadu(mask,ptr); } | 
| 242 |      | 
| 243 |     static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { vfloat4::store (mask,ptr,v); } | 
| 244 |     static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { vfloat4::storeu(mask,ptr,v); } | 
| 245 |   }; | 
| 246 |      | 
| 247 |   //////////////////////////////////////////////////////////////////////////////// | 
| 248 |   /// Unary Operators | 
| 249 |   //////////////////////////////////////////////////////////////////////////////// | 
| 250 |  | 
| 251 |   __forceinline vfloat4 asFloat(const vint4&   a) { return _mm_castsi128_ps(a: a); } | 
| 252 |   __forceinline vint4   asInt  (const vfloat4& a) { return _mm_castps_si128(a: a); } | 
| 253 |   __forceinline vuint4  asUInt (const vfloat4& a) { return _mm_castps_si128(a: a); } | 
| 254 |  | 
| 255 |   __forceinline vint4   toInt  (const vfloat4& a) { return vint4(a); } | 
| 256 |   __forceinline vfloat4 toFloat(const vint4&   a) { return vfloat4(a); } | 
| 257 |  | 
| 258 |   __forceinline vfloat4 operator +(const vfloat4& a) { return a; } | 
| 259 |   __forceinline vfloat4 operator -(const vfloat4& a) { return _mm_xor_ps(a: a, b: _mm_castsi128_ps(a: _mm_set1_epi32(i: 0x80000000))); } | 
| 260 |  | 
| 261 |   __forceinline vfloat4 abs(const vfloat4& a) { return _mm_and_ps(a: a, b: _mm_castsi128_ps(a: _mm_set1_epi32(i: 0x7fffffff))); } | 
| 262 | #if defined(__AVX512VL__) | 
| 263 |   __forceinline vfloat4 sign(const vfloat4& a) { return _mm_mask_blend_ps(_mm_cmp_ps_mask(a, vfloat4(zero), _CMP_LT_OQ), vfloat4(one), -vfloat4(one)); } | 
| 264 | #else | 
| 265 |   __forceinline vfloat4 sign(const vfloat4& a) { return blendv_ps(f: vfloat4(one), t: -vfloat4(one), mask: _mm_cmplt_ps(a: a, b: vfloat4(zero))); } | 
| 266 | #endif | 
| 267 |   __forceinline vfloat4 signmsk(const vfloat4& a) { return _mm_and_ps(a: a,b: _mm_castsi128_ps(a: _mm_set1_epi32(i: 0x80000000))); } | 
| 268 |    | 
| 269 |   __forceinline vfloat4 rcp(const vfloat4& a) | 
| 270 |   { | 
| 271 | #if defined(__AVX512VL__) | 
| 272 |     const vfloat4 r = _mm_rcp14_ps(a); | 
| 273 | #else | 
| 274 |     const vfloat4 r = _mm_rcp_ps(a: a); | 
| 275 | #endif | 
| 276 |  | 
| 277 | #if defined(__AVX2__) | 
| 278 |     return _mm_fmadd_ps(r, _mm_fnmadd_ps(a, r, vfloat4(1.0f)), r);                    // computes r + r * (1 - a * r) | 
| 279 | #else | 
| 280 |     return _mm_add_ps(a: r,b: _mm_mul_ps(a: r, b: _mm_sub_ps(a: vfloat4(1.0f), b: _mm_mul_ps(a: a, b: r))));  // computes r + r * (1 - a * r) | 
| 281 | #endif | 
| 282 |   } | 
| 283 |   __forceinline vfloat4 sqr (const vfloat4& a) { return _mm_mul_ps(a: a,b: a); } | 
| 284 |   __forceinline vfloat4 sqrt(const vfloat4& a) { return _mm_sqrt_ps(a: a); } | 
| 285 |  | 
| 286 |   __forceinline vfloat4 rsqrt(const vfloat4& a) | 
| 287 |   { | 
| 288 | #if defined(__AVX512VL__) | 
| 289 |     vfloat4 r = _mm_rsqrt14_ps(a); | 
| 290 | #else | 
| 291 |     vfloat4 r = _mm_rsqrt_ps(a: a); | 
| 292 | #endif | 
| 293 |  | 
| 294 | #if defined(__ARM_NEON) | 
| 295 |     r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); | 
| 296 |     r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); | 
| 297 | #elif defined(__AVX2__) | 
| 298 |     r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); | 
| 299 | #else | 
| 300 |     r = _mm_add_ps(a: _mm_mul_ps(a: _mm_set1_ps(w: 1.5f), b: r), b: _mm_mul_ps(a: _mm_mul_ps(a: _mm_mul_ps(a: a, b: _mm_set1_ps(w: -0.5f)), b: r), b: _mm_mul_ps(a: r, b: r))); | 
| 301 | #endif | 
| 302 |     return r; | 
| 303 |   } | 
| 304 |  | 
| 305 |   __forceinline vboolf4 isnan(const vfloat4& a) { | 
| 306 |     const vfloat4 b = _mm_and_ps(a: a, b: _mm_castsi128_ps(a: _mm_set1_epi32(i: 0x7fffffff))); | 
| 307 | #if defined(__AVX512VL__) | 
| 308 |     return _mm_cmp_epi32_mask(_mm_castps_si128(b), _mm_set1_epi32(0x7f800000), _MM_CMPINT_GT); | 
| 309 | #else | 
| 310 |     return _mm_castsi128_ps(a: _mm_cmpgt_epi32(a: _mm_castps_si128(a: b), b: _mm_set1_epi32(i: 0x7f800000))); | 
| 311 | #endif | 
| 312 |   } | 
| 313 |  | 
| 314 |   //////////////////////////////////////////////////////////////////////////////// | 
| 315 |   /// Binary Operators | 
| 316 |   //////////////////////////////////////////////////////////////////////////////// | 
| 317 |  | 
| 318 |   __forceinline vfloat4 operator +(const vfloat4& a, const vfloat4& b) { return _mm_add_ps(a: a, b: b); } | 
| 319 |   __forceinline vfloat4 operator +(const vfloat4& a, float          b) { return a + vfloat4(b); } | 
| 320 |   __forceinline vfloat4 operator +(float          a, const vfloat4& b) { return vfloat4(a) + b; } | 
| 321 |  | 
| 322 |   __forceinline vfloat4 operator -(const vfloat4& a, const vfloat4& b) { return _mm_sub_ps(a: a, b: b); } | 
| 323 |   __forceinline vfloat4 operator -(const vfloat4& a, float          b) { return a - vfloat4(b); } | 
| 324 |   __forceinline vfloat4 operator -(float          a, const vfloat4& b) { return vfloat4(a) - b; } | 
| 325 |  | 
| 326 |   __forceinline vfloat4 operator *(const vfloat4& a, const vfloat4& b) { return _mm_mul_ps(a: a, b: b); } | 
| 327 |   __forceinline vfloat4 operator *(const vfloat4& a, float          b) { return a * vfloat4(b); } | 
| 328 |   __forceinline vfloat4 operator *(float          a, const vfloat4& b) { return vfloat4(a) * b; } | 
| 329 |  | 
| 330 |   __forceinline vfloat4 operator /(const vfloat4& a, const vfloat4& b) { return _mm_div_ps(a: a,b: b); } | 
| 331 |   __forceinline vfloat4 operator /(const vfloat4& a, float          b) { return a/vfloat4(b); } | 
| 332 |   __forceinline vfloat4 operator /(float          a, const vfloat4& b) { return vfloat4(a)/b; } | 
| 333 |  | 
| 334 |   __forceinline vfloat4 operator &(const vfloat4& a, const vfloat4& b) { return _mm_and_ps(a: a,b: b); } | 
| 335 |   __forceinline vfloat4 operator |(const vfloat4& a, const vfloat4& b) { return _mm_or_ps(a: a,b: b); } | 
| 336 |   __forceinline vfloat4 operator ^(const vfloat4& a, const vfloat4& b) { return _mm_xor_ps(a: a,b: b); } | 
| 337 |   __forceinline vfloat4 operator ^(const vfloat4& a, const vint4&   b) { return _mm_xor_ps(a: a,b: _mm_castsi128_ps(a: b)); } | 
| 338 |  | 
| 339 |   __forceinline vfloat4 min(const vfloat4& a, const vfloat4& b) { return _mm_min_ps(a: a,b: b); } | 
| 340 |   __forceinline vfloat4 min(const vfloat4& a, float          b) { return _mm_min_ps(a: a,b: vfloat4(b)); } | 
| 341 |   __forceinline vfloat4 min(float          a, const vfloat4& b) { return _mm_min_ps(a: vfloat4(a),b: b); } | 
| 342 |  | 
| 343 |   __forceinline vfloat4 max(const vfloat4& a, const vfloat4& b) { return _mm_max_ps(a: a,b: b); } | 
| 344 |   __forceinline vfloat4 max(const vfloat4& a, float          b) { return _mm_max_ps(a: a,b: vfloat4(b)); } | 
| 345 |   __forceinline vfloat4 max(float          a, const vfloat4& b) { return _mm_max_ps(a: vfloat4(a),b: b); } | 
| 346 |  | 
| 347 | #if defined(__SSE4_1__) | 
| 348 |     __forceinline vfloat4 mini(const vfloat4& a, const vfloat4& b) { | 
| 349 |       const vint4 ai = _mm_castps_si128(a); | 
| 350 |       const vint4 bi = _mm_castps_si128(b); | 
| 351 |       const vint4 ci = _mm_min_epi32(ai,bi); | 
| 352 |       return _mm_castsi128_ps(ci); | 
| 353 |     } | 
| 354 |  | 
| 355 |     __forceinline vfloat4 maxi(const vfloat4& a, const vfloat4& b) { | 
| 356 |       const vint4 ai = _mm_castps_si128(a); | 
| 357 |       const vint4 bi = _mm_castps_si128(b); | 
| 358 |       const vint4 ci = _mm_max_epi32(ai,bi); | 
| 359 |       return _mm_castsi128_ps(ci); | 
| 360 |     } | 
| 361 |  | 
| 362 |     __forceinline vfloat4 minui(const vfloat4& a, const vfloat4& b) { | 
| 363 |       const vint4 ai = _mm_castps_si128(a); | 
| 364 |       const vint4 bi = _mm_castps_si128(b); | 
| 365 |       const vint4 ci = _mm_min_epu32(ai,bi); | 
| 366 |       return _mm_castsi128_ps(ci); | 
| 367 |     } | 
| 368 |  | 
| 369 |     __forceinline vfloat4 maxui(const vfloat4& a, const vfloat4& b) { | 
| 370 |       const vint4 ai = _mm_castps_si128(a); | 
| 371 |       const vint4 bi = _mm_castps_si128(b); | 
| 372 |       const vint4 ci = _mm_max_epu32(ai,bi); | 
| 373 |       return _mm_castsi128_ps(ci); | 
| 374 |     } | 
| 375 | #else | 
| 376 |     __forceinline vfloat4 mini(const vfloat4& a, const vfloat4& b) { | 
| 377 |       return min(a,b); | 
| 378 |     } | 
| 379 |  | 
| 380 |     __forceinline vfloat4 maxi(const vfloat4& a, const vfloat4& b) { | 
| 381 |       return max(a,b); | 
| 382 |     } | 
| 383 | #endif | 
| 384 |  | 
| 385 |   //////////////////////////////////////////////////////////////////////////////// | 
| 386 |   /// Ternary Operators | 
| 387 |   //////////////////////////////////////////////////////////////////////////////// | 
| 388 |  | 
| 389 | #if defined(__AVX2__) || defined(__ARM_NEON) | 
| 390 |   __forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fmadd_ps(a,b,c); } | 
| 391 |   __forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fmsub_ps(a,b,c); } | 
| 392 |   __forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fnmadd_ps(a,b,c); } | 
| 393 |   __forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fnmsub_ps(a,b,c); } | 
| 394 | #else | 
| 395 |   __forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b+c; } | 
| 396 |   __forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b-c; } | 
| 397 |   __forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b+c;} | 
| 398 |   __forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b-c; } | 
| 399 | #endif | 
| 400 |  | 
| 401 |   //////////////////////////////////////////////////////////////////////////////// | 
| 402 |   /// Assignment Operators | 
| 403 |   //////////////////////////////////////////////////////////////////////////////// | 
| 404 |  | 
| 405 |   __forceinline vfloat4& operator +=(vfloat4& a, const vfloat4& b) { return a = a + b; } | 
| 406 |   __forceinline vfloat4& operator +=(vfloat4& a, float          b) { return a = a + b; } | 
| 407 |  | 
| 408 |   __forceinline vfloat4& operator -=(vfloat4& a, const vfloat4& b) { return a = a - b; } | 
| 409 |   __forceinline vfloat4& operator -=(vfloat4& a, float          b) { return a = a - b; } | 
| 410 |  | 
| 411 |   __forceinline vfloat4& operator *=(vfloat4& a, const vfloat4& b) { return a = a * b; } | 
| 412 |   __forceinline vfloat4& operator *=(vfloat4& a, float          b) { return a = a * b; } | 
| 413 |  | 
| 414 |   __forceinline vfloat4& operator /=(vfloat4& a, const vfloat4& b) { return a = a / b; } | 
| 415 |   __forceinline vfloat4& operator /=(vfloat4& a, float          b) { return a = a / b; } | 
| 416 |  | 
| 417 |   //////////////////////////////////////////////////////////////////////////////// | 
| 418 |   /// Comparison Operators + Select | 
| 419 |   //////////////////////////////////////////////////////////////////////////////// | 
| 420 |  | 
| 421 | #if defined(__AVX512VL__) | 
| 422 |   __forceinline vboolf4 operator ==(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_EQ); } | 
| 423 |   __forceinline vboolf4 operator !=(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_NE); } | 
| 424 |   __forceinline vboolf4 operator < (const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_LT); } | 
| 425 |   __forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_GE); } | 
| 426 |   __forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_GT); } | 
| 427 |   __forceinline vboolf4 operator <=(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_LE); } | 
| 428 | #else | 
| 429 |   __forceinline vboolf4 operator ==(const vfloat4& a, const vfloat4& b) { return _mm_cmpeq_ps (a: a, b: b); } | 
| 430 |   __forceinline vboolf4 operator !=(const vfloat4& a, const vfloat4& b) { return _mm_cmpneq_ps(a: a, b: b); } | 
| 431 |   __forceinline vboolf4 operator < (const vfloat4& a, const vfloat4& b) { return _mm_cmplt_ps (a: a, b: b); } | 
| 432 |   __forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmpnlt_ps(a: a, b: b); } | 
| 433 |   __forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmpnle_ps(a: a, b: b); } | 
| 434 |   __forceinline vboolf4 operator <=(const vfloat4& a, const vfloat4& b) { return _mm_cmple_ps (a: a, b: b); } | 
| 435 | #endif | 
| 436 |  | 
| 437 |   __forceinline vboolf4 operator ==(const vfloat4& a, float          b) { return a == vfloat4(b); } | 
| 438 |   __forceinline vboolf4 operator ==(float          a, const vfloat4& b) { return vfloat4(a) == b; } | 
| 439 |  | 
| 440 |   __forceinline vboolf4 operator !=(const vfloat4& a, float          b) { return a != vfloat4(b); } | 
| 441 |   __forceinline vboolf4 operator !=(float          a, const vfloat4& b) { return vfloat4(a) != b; } | 
| 442 |  | 
| 443 |   __forceinline vboolf4 operator < (const vfloat4& a, float          b) { return a <  vfloat4(b); } | 
| 444 |   __forceinline vboolf4 operator < (float          a, const vfloat4& b) { return vfloat4(a) <  b; } | 
| 445 |    | 
| 446 |   __forceinline vboolf4 operator >=(const vfloat4& a, float          b) { return a >= vfloat4(b); } | 
| 447 |   __forceinline vboolf4 operator >=(float          a, const vfloat4& b) { return vfloat4(a) >= b; } | 
| 448 |  | 
| 449 |   __forceinline vboolf4 operator > (const vfloat4& a, float          b) { return a >  vfloat4(b); } | 
| 450 |   __forceinline vboolf4 operator > (float          a, const vfloat4& b) { return vfloat4(a) >  b; } | 
| 451 |  | 
| 452 |   __forceinline vboolf4 operator <=(const vfloat4& a, float          b) { return a <= vfloat4(b); } | 
| 453 |   __forceinline vboolf4 operator <=(float          a, const vfloat4& b) { return vfloat4(a) <= b; } | 
| 454 |  | 
| 455 |   __forceinline vboolf4 eq(const vfloat4& a, const vfloat4& b) { return a == b; } | 
| 456 |   __forceinline vboolf4 ne(const vfloat4& a, const vfloat4& b) { return a != b; } | 
| 457 |   __forceinline vboolf4 lt(const vfloat4& a, const vfloat4& b) { return a <  b; } | 
| 458 |   __forceinline vboolf4 ge(const vfloat4& a, const vfloat4& b) { return a >= b; } | 
| 459 |   __forceinline vboolf4 gt(const vfloat4& a, const vfloat4& b) { return a >  b; } | 
| 460 |   __forceinline vboolf4 le(const vfloat4& a, const vfloat4& b) { return a <= b; } | 
| 461 |  | 
| 462 | #if defined(__AVX512VL__) | 
| 463 |   __forceinline vboolf4 eq(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_EQ); } | 
| 464 |   __forceinline vboolf4 ne(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_NE); } | 
| 465 |   __forceinline vboolf4 lt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_LT); } | 
| 466 |   __forceinline vboolf4 ge(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_GE); } | 
| 467 |   __forceinline vboolf4 gt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_GT); } | 
| 468 |   __forceinline vboolf4 le(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_LE); } | 
| 469 | #else | 
| 470 |   __forceinline vboolf4 eq(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a == b); } | 
| 471 |   __forceinline vboolf4 ne(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a != b); } | 
| 472 |   __forceinline vboolf4 lt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a <  b); } | 
| 473 |   __forceinline vboolf4 ge(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a >= b); } | 
| 474 |   __forceinline vboolf4 gt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a >  b); } | 
| 475 |   __forceinline vboolf4 le(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a <= b); } | 
| 476 | #endif | 
| 477 |  | 
| 478 |   template<int mask> | 
| 479 |     __forceinline vfloat4 select(const vfloat4& t, const vfloat4& f) | 
| 480 |   { | 
| 481 | #if defined(__SSE4_1__)  | 
| 482 |     return _mm_blend_ps(f, t, mask); | 
| 483 | #else | 
| 484 |     return select(m: vboolf4(mask), t, f); | 
| 485 | #endif | 
| 486 |   } | 
| 487 |    | 
| 488 |   __forceinline vfloat4 lerp(const vfloat4& a, const vfloat4& b, const vfloat4& t) { | 
| 489 |     return madd(a: t,b: b-a,c: a); | 
| 490 |   } | 
| 491 |    | 
| 492 |   __forceinline bool isvalid(const vfloat4& v) { | 
| 493 |     return all(b: (v > vfloat4(-FLT_LARGE)) & (v < vfloat4(+FLT_LARGE))); | 
| 494 |   } | 
| 495 |  | 
| 496 |   __forceinline bool is_finite(const vfloat4& a) { | 
| 497 |     return all(b: (a >= vfloat4(-FLT_MAX)) & (a <= vfloat4(+FLT_MAX))); | 
| 498 |   } | 
| 499 |  | 
| 500 |   __forceinline bool is_finite(const vboolf4& valid, const vfloat4& a) { | 
| 501 |     return all(valid, b: (a >= vfloat4(-FLT_MAX)) & (a <= vfloat4(+FLT_MAX))); | 
| 502 |   } | 
| 503 |    | 
| 504 |   //////////////////////////////////////////////////////////////////////////////// | 
| 505 |   /// Rounding Functions | 
| 506 |   //////////////////////////////////////////////////////////////////////////////// | 
| 507 |  | 
| 508 | #if defined(__aarch64__) | 
| 509 |   __forceinline vfloat4 floor(const vfloat4& a) { return vrndmq_f32(a.v); } | 
| 510 |   __forceinline vfloat4 ceil (const vfloat4& a) { return vrndpq_f32(a.v); } | 
| 511 |   __forceinline vfloat4 trunc(const vfloat4& a) { return vrndq_f32(a.v); } | 
| 512 |   __forceinline vfloat4 round(const vfloat4& a) { return vrndnq_f32(a.v); } | 
| 513 | #elif defined (__SSE4_1__) | 
| 514 |   __forceinline vfloat4 floor(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF    ); } | 
| 515 |   __forceinline vfloat4 ceil (const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF    ); } | 
| 516 |   __forceinline vfloat4 trunc(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_ZERO       ); } | 
| 517 |   __forceinline vfloat4 round(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); } | 
| 518 | #else | 
| 519 |   __forceinline vfloat4 floor(const vfloat4& a) { return vfloat4(floorf(x: a[0]),floorf(x: a[1]),floorf(x: a[2]),floorf(x: a[3])); } | 
| 520 |   __forceinline vfloat4 ceil (const vfloat4& a) { return vfloat4(ceilf (x: a[0]),ceilf (x: a[1]),ceilf (x: a[2]),ceilf (x: a[3])); } | 
| 521 |   __forceinline vfloat4 trunc(const vfloat4& a) { return vfloat4(truncf(x: a[0]),truncf(x: a[1]),truncf(x: a[2]),truncf(x: a[3])); } | 
| 522 |   __forceinline vfloat4 round(const vfloat4& a) { return vfloat4(roundf(x: a[0]),roundf(x: a[1]),roundf(x: a[2]),roundf(x: a[3])); } | 
| 523 | #endif | 
| 524 |   __forceinline vfloat4 frac(const vfloat4& a) { return a-floor(a); } | 
| 525 |  | 
| 526 |   __forceinline vint4 floori(const vfloat4& a) { | 
| 527 | #if defined(__SSE4_1__) | 
| 528 |     return vint4(floor(a)); | 
| 529 | #else | 
| 530 |     return vint4(a-vfloat4(0.5f)); | 
| 531 | #endif | 
| 532 |   } | 
| 533 |  | 
| 534 |   //////////////////////////////////////////////////////////////////////////////// | 
| 535 |   /// Movement/Shifting/Shuffling Functions | 
| 536 |   //////////////////////////////////////////////////////////////////////////////// | 
| 537 |  | 
| 538 |   __forceinline vfloat4 unpacklo(const vfloat4& a, const vfloat4& b) { return _mm_unpacklo_ps(a: a, b: b); } | 
| 539 |   __forceinline vfloat4 unpackhi(const vfloat4& a, const vfloat4& b) { return _mm_unpackhi_ps(a: a, b: b); } | 
| 540 |  | 
| 541 |   template<int i0, int i1, int i2, int i3> | 
| 542 |   __forceinline vfloat4 shuffle(const vfloat4& v) { | 
| 543 |     return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v), _MM_SHUFFLE(i3, i2, i1, i0))); | 
| 544 |   } | 
| 545 |  | 
| 546 |   template<int i0, int i1, int i2, int i3> | 
| 547 |   __forceinline vfloat4 shuffle(const vfloat4& a, const vfloat4& b) { | 
| 548 |     return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0)); | 
| 549 |   } | 
| 550 |  | 
| 551 | #if defined(__SSE3__) | 
| 552 |   template<> __forceinline vfloat4 shuffle<0, 0, 2, 2>(const vfloat4& v) { return _mm_moveldup_ps(v); } | 
| 553 |   template<> __forceinline vfloat4 shuffle<1, 1, 3, 3>(const vfloat4& v) { return _mm_movehdup_ps(v); } | 
| 554 |   template<> __forceinline vfloat4 shuffle<0, 1, 0, 1>(const vfloat4& v) { return _mm_castpd_ps(_mm_movedup_pd(_mm_castps_pd(v))); } | 
| 555 | #endif | 
| 556 |  | 
| 557 |   template<int i> | 
| 558 |   __forceinline vfloat4 shuffle(const vfloat4& v) { | 
| 559 |     return shuffle<i,i,i,i>(v); | 
| 560 |   } | 
| 561 |  | 
| 562 |   template<int i> __forceinline float    (const vfloat4& a) { return _mm_cvtss_f32(shuffle<i>(a)); } | 
| 563 |   template<>      __forceinline float <0>(const vfloat4& a) { return _mm_cvtss_f32(a: a); } | 
| 564 |  | 
| 565 | #if defined (__SSE4_1__) | 
| 566 |   template<int dst, int src, int clr> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return _mm_insert_ps(a, b, (dst << 4) | (src << 6) | clr); } | 
| 567 |   template<int dst, int src> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return insert<dst, src, 0>(a, b); } | 
| 568 |   template<int dst> __forceinline vfloat4 insert(const vfloat4& a, const float b) { return insert<dst, 0>(a, _mm_set_ss(b)); } | 
| 569 | #else | 
| 570 |   template<int dst, int src> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { vfloat4 c = a; c[dst&3] = b[src&3]; return c; } | 
| 571 |   template<int dst>  __forceinline vfloat4 insert(const vfloat4& a, float b) { vfloat4 c = a; c[dst&3] = b; return c; } | 
| 572 | #endif | 
| 573 |  | 
| 574 |   __forceinline float toScalar(const vfloat4& v) { return _mm_cvtss_f32(a: v); } | 
| 575 |  | 
| 576 |   __forceinline vfloat4 shift_right_1(const vfloat4& x) { | 
| 577 |     return _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(x), 4));  | 
| 578 |   } | 
| 579 |  | 
| 580 | #if defined (__AVX2__) | 
| 581 |   __forceinline vfloat4 permute(const vfloat4 &a, const __m128i &index) { | 
| 582 |     return _mm_permutevar_ps(a,index); | 
| 583 |   } | 
| 584 |  | 
| 585 |   __forceinline vfloat4 broadcast1f(const void* a) { return _mm_broadcast_ss((float*)a); } | 
| 586 |  | 
| 587 | #endif | 
| 588 |  | 
| 589 | #if defined(__AVX512VL__) | 
| 590 |   template<int i> | 
| 591 |   __forceinline vfloat4 align_shift_right(const vfloat4& a, const vfloat4& b) { | 
| 592 |     return _mm_castsi128_ps(_mm_alignr_epi32(_mm_castps_si128(a), _mm_castps_si128(b), i)); | 
| 593 |   }   | 
| 594 | #endif | 
| 595 |  | 
| 596 |  | 
| 597 |   //////////////////////////////////////////////////////////////////////////////// | 
| 598 |   /// Sorting Network | 
| 599 |   //////////////////////////////////////////////////////////////////////////////// | 
| 600 |  | 
| 601 |   __forceinline vfloat4 sort_ascending(const vfloat4& v) | 
| 602 |   { | 
| 603 |     const vfloat4 a0 = v; | 
| 604 |     const vfloat4 b0 = shuffle<1,0,3,2>(v: a0); | 
| 605 |     const vfloat4 c0 = min(a: a0,b: b0); | 
| 606 |     const vfloat4 d0 = max(a: a0,b: b0); | 
| 607 |     const vfloat4 a1 = select<0x5 /* 0b0101 */>(t: c0,f: d0); | 
| 608 |     const vfloat4 b1 = shuffle<2,3,0,1>(v: a1); | 
| 609 |     const vfloat4 c1 = min(a: a1,b: b1); | 
| 610 |     const vfloat4 d1 = max(a: a1,b: b1); | 
| 611 |     const vfloat4 a2 = select<0x3 /* 0b0011 */>(t: c1,f: d1); | 
| 612 |     const vfloat4 b2 = shuffle<0,2,1,3>(v: a2); | 
| 613 |     const vfloat4 c2 = min(a: a2,b: b2); | 
| 614 |     const vfloat4 d2 = max(a: a2,b: b2); | 
| 615 |     const vfloat4 a3 = select<0x2 /* 0b0010 */>(t: c2,f: d2); | 
| 616 |     return a3; | 
| 617 |   } | 
| 618 |  | 
| 619 |   __forceinline vfloat4 sort_descending(const vfloat4& v) | 
| 620 |   { | 
| 621 |     const vfloat4 a0 = v; | 
| 622 |     const vfloat4 b0 = shuffle<1,0,3,2>(v: a0); | 
| 623 |     const vfloat4 c0 = max(a: a0,b: b0); | 
| 624 |     const vfloat4 d0 = min(a: a0,b: b0); | 
| 625 |     const vfloat4 a1 = select<0x5 /* 0b0101 */>(t: c0,f: d0); | 
| 626 |     const vfloat4 b1 = shuffle<2,3,0,1>(v: a1); | 
| 627 |     const vfloat4 c1 = max(a: a1,b: b1); | 
| 628 |     const vfloat4 d1 = min(a: a1,b: b1); | 
| 629 |     const vfloat4 a2 = select<0x3 /* 0b0011 */>(t: c1,f: d1); | 
| 630 |     const vfloat4 b2 = shuffle<0,2,1,3>(v: a2); | 
| 631 |     const vfloat4 c2 = max(a: a2,b: b2); | 
| 632 |     const vfloat4 d2 = min(a: a2,b: b2); | 
| 633 |     const vfloat4 a3 = select<0x2 /* 0b0010 */>(t: c2,f: d2); | 
| 634 |     return a3; | 
| 635 |   } | 
| 636 |  | 
| 637 |   //////////////////////////////////////////////////////////////////////////////// | 
| 638 |   /// Transpose | 
| 639 |   //////////////////////////////////////////////////////////////////////////////// | 
| 640 |  | 
| 641 |   __forceinline void transpose(const vfloat4& r0, const vfloat4& r1, const vfloat4& r2, const vfloat4& r3, vfloat4& c0, vfloat4& c1, vfloat4& c2, vfloat4& c3) | 
| 642 |   { | 
| 643 |     vfloat4 l02 = unpacklo(a: r0,b: r2); | 
| 644 |     vfloat4 h02 = unpackhi(a: r0,b: r2); | 
| 645 |     vfloat4 l13 = unpacklo(a: r1,b: r3); | 
| 646 |     vfloat4 h13 = unpackhi(a: r1,b: r3); | 
| 647 |     c0 = unpacklo(a: l02,b: l13); | 
| 648 |     c1 = unpackhi(a: l02,b: l13); | 
| 649 |     c2 = unpacklo(a: h02,b: h13); | 
| 650 |     c3 = unpackhi(a: h02,b: h13); | 
| 651 |   } | 
| 652 |  | 
| 653 |   __forceinline void transpose(const vfloat4& r0, const vfloat4& r1, const vfloat4& r2, const vfloat4& r3, vfloat4& c0, vfloat4& c1, vfloat4& c2) | 
| 654 |   { | 
| 655 |     vfloat4 l02 = unpacklo(a: r0,b: r2); | 
| 656 |     vfloat4 h02 = unpackhi(a: r0,b: r2); | 
| 657 |     vfloat4 l13 = unpacklo(a: r1,b: r3); | 
| 658 |     vfloat4 h13 = unpackhi(a: r1,b: r3); | 
| 659 |     c0 = unpacklo(a: l02,b: l13); | 
| 660 |     c1 = unpackhi(a: l02,b: l13); | 
| 661 |     c2 = unpacklo(a: h02,b: h13); | 
| 662 |   } | 
| 663 |  | 
| 664 |   //////////////////////////////////////////////////////////////////////////////// | 
| 665 |   /// Reductions | 
| 666 |   //////////////////////////////////////////////////////////////////////////////// | 
| 667 |  | 
| 668 |   __forceinline vfloat4 vreduce_min(const vfloat4& v) { vfloat4 h = min(a: shuffle<1,0,3,2>(v),b: v); return min(a: shuffle<2,3,0,1>(v: h),b: h); } | 
| 669 |   __forceinline vfloat4 vreduce_max(const vfloat4& v) { vfloat4 h = max(a: shuffle<1,0,3,2>(v),b: v); return max(a: shuffle<2,3,0,1>(v: h),b: h); } | 
| 670 |   __forceinline vfloat4 vreduce_add(const vfloat4& v) { vfloat4 h = shuffle<1,0,3,2>(v)   + v ; return shuffle<2,3,0,1>(v: h)   + h ; } | 
| 671 |  | 
| 672 |   __forceinline float reduce_min(const vfloat4& v) { return _mm_cvtss_f32(a: vreduce_min(v)); } | 
| 673 |   __forceinline float reduce_max(const vfloat4& v) { return _mm_cvtss_f32(a: vreduce_max(v)); } | 
| 674 |   __forceinline float reduce_add(const vfloat4& v) { return _mm_cvtss_f32(a: vreduce_add(v)); } | 
| 675 |  | 
| 676 |   __forceinline size_t select_min(const vboolf4& valid, const vfloat4& v)  | 
| 677 |   {  | 
| 678 |     const vfloat4 a = select(m: valid,t: v,f: vfloat4(pos_inf));  | 
| 679 |     const vbool4 valid_min = valid & (a == vreduce_min(v: a)); | 
| 680 |     return bsf(v: movemask(a: any(b: valid_min) ? valid_min : valid));  | 
| 681 |   } | 
| 682 |   __forceinline size_t select_max(const vboolf4& valid, const vfloat4& v)  | 
| 683 |   {  | 
| 684 |     const vfloat4 a = select(m: valid,t: v,f: vfloat4(neg_inf));  | 
| 685 |     const vbool4 valid_max = valid & (a == vreduce_max(v: a)); | 
| 686 |     return bsf(v: movemask(a: any(b: valid_max) ? valid_max : valid));  | 
| 687 |   } | 
| 688 |    | 
| 689 |   //////////////////////////////////////////////////////////////////////////////// | 
| 690 |   /// Euclidian Space Operators | 
| 691 |   //////////////////////////////////////////////////////////////////////////////// | 
| 692 |  | 
| 693 |   __forceinline float dot(const vfloat4& a, const vfloat4& b) { | 
| 694 |     return reduce_add(v: a*b); | 
| 695 |   } | 
| 696 |  | 
| 697 |   __forceinline vfloat4 cross(const vfloat4& a, const vfloat4& b) | 
| 698 |   { | 
| 699 |     const vfloat4 a0 = a; | 
| 700 |     const vfloat4 b0 = shuffle<1,2,0,3>(v: b); | 
| 701 |     const vfloat4 a1 = shuffle<1,2,0,3>(v: a); | 
| 702 |     const vfloat4 b1 = b; | 
| 703 |     return shuffle<1,2,0,3>(v: msub(a: a0,b: b0,c: a1*b1)); | 
| 704 |   } | 
| 705 |  | 
| 706 |   //////////////////////////////////////////////////////////////////////////////// | 
| 707 |   /// Output Operators | 
| 708 |   //////////////////////////////////////////////////////////////////////////////// | 
| 709 |  | 
| 710 |   __forceinline embree_ostream operator <<(embree_ostream cout, const vfloat4& a) { | 
| 711 |     return cout << "<"  << a[0] << ", "  << a[1] << ", "  << a[2] << ", "  << a[3] << ">" ; | 
| 712 |   } | 
| 713 |  | 
| 714 | } | 
| 715 |  | 
| 716 | #undef vboolf | 
| 717 | #undef vboold | 
| 718 | #undef vint | 
| 719 | #undef vuint | 
| 720 | #undef vllong | 
| 721 | #undef vfloat | 
| 722 | #undef vdouble | 
| 723 |  |