| 1 | // Copyright (C) 2021 The Qt Company Ltd. | 
| 2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only | 
| 3 |  | 
| 4 | #ifndef QNUMERIC_H | 
| 5 | #define QNUMERIC_H | 
| 6 |  | 
| 7 | #if 0 | 
| 8 | #pragma qt_class(QtNumeric) | 
| 9 | #endif | 
| 10 |  | 
| 11 | #include <QtCore/qtconfigmacros.h> | 
| 12 | #include <QtCore/qtcoreexports.h> | 
| 13 | #include <QtCore/qtypes.h> | 
| 14 |  | 
| 15 | #include <cmath> | 
| 16 | #include <limits> | 
| 17 | #include <type_traits> | 
| 18 |  | 
| 19 | // min() and max() may be #defined by windows.h if that is included before, but we need them | 
| 20 | // for std::numeric_limits below. You should not use the min() and max() macros, so we just #undef. | 
| 21 | #ifdef min | 
| 22 | #  undef min | 
| 23 | #  undef max | 
| 24 | #endif | 
| 25 |  | 
| 26 | // | 
| 27 | // SIMDe (SIMD Everywhere) can't be used if intrin.h has been included as many definitions | 
| 28 | // conflict.   Defining Q_NUMERIC_NO_INTRINSICS allows SIMDe users to use Qt, at the cost of | 
| 29 | // falling back to the prior implementations of qMulOverflow and qAddOverflow. | 
| 30 | // | 
| 31 | #if defined(Q_CC_MSVC) && !defined(Q_NUMERIC_NO_INTRINSICS) | 
| 32 | #  include <intrin.h> | 
| 33 | #  include <float.h> | 
| 34 | #  if defined(Q_PROCESSOR_X86) || defined(Q_PROCESSOR_X86_64) | 
| 35 | #    define Q_HAVE_ADDCARRY | 
| 36 | #  endif | 
| 37 | #  if defined(Q_PROCESSOR_X86_64) || defined(Q_PROCESSOR_ARM_64) | 
| 38 | #    define Q_INTRINSIC_MUL_OVERFLOW64 | 
| 39 | #    define Q_UMULH(v1, v2) __umulh(v1, v2); | 
| 40 | #    define Q_SMULH(v1, v2) __mulh(v1, v2); | 
| 41 | #    pragma intrinsic(__umulh) | 
| 42 | #    pragma intrinsic(__mulh) | 
| 43 | #  endif | 
| 44 | #endif | 
| 45 |  | 
| 46 | # if defined(Q_OS_INTEGRITY) && defined(Q_PROCESSOR_ARM_64) | 
| 47 | #  include <arm64_ghs.h> | 
| 48 | #  define Q_INTRINSIC_MUL_OVERFLOW64 | 
| 49 | #  define Q_UMULH(v1, v2) __MULUH64(v1, v2); | 
| 50 | #  define Q_SMULH(v1, v2) __MULSH64(v1, v2); | 
| 51 | #endif | 
| 52 |  | 
| 53 | QT_BEGIN_NAMESPACE | 
| 54 |  | 
| 55 | // To match std::is{inf,nan,finite} functions: | 
| 56 | template <typename T> | 
| 57 | constexpr typename std::enable_if<std::is_integral<T>::value, bool>::type | 
| 58 | qIsInf(T) { return false; } | 
| 59 | template <typename T> | 
| 60 | constexpr typename std::enable_if<std::is_integral<T>::value, bool>::type | 
| 61 | qIsNaN(T) { return false; } | 
| 62 | template <typename T> | 
| 63 | constexpr typename std::enable_if<std::is_integral<T>::value, bool>::type | 
| 64 | qIsFinite(T) { return true; } | 
| 65 |  | 
| 66 | // Floating-point types (see qfloat16.h for its overloads). | 
| 67 | Q_CORE_EXPORT Q_DECL_CONST_FUNCTION bool qIsInf(double d); | 
| 68 | Q_CORE_EXPORT Q_DECL_CONST_FUNCTION bool qIsNaN(double d); | 
| 69 | Q_CORE_EXPORT Q_DECL_CONST_FUNCTION bool qIsFinite(double d); | 
| 70 | Q_CORE_EXPORT Q_DECL_CONST_FUNCTION int qFpClassify(double val); | 
| 71 | Q_CORE_EXPORT Q_DECL_CONST_FUNCTION bool qIsInf(float f); | 
| 72 | Q_CORE_EXPORT Q_DECL_CONST_FUNCTION bool qIsNaN(float f); | 
| 73 | Q_CORE_EXPORT Q_DECL_CONST_FUNCTION bool qIsFinite(float f); | 
| 74 | Q_CORE_EXPORT Q_DECL_CONST_FUNCTION int qFpClassify(float val); | 
| 75 |  | 
| 76 | #if QT_CONFIG(signaling_nan) | 
| 77 | Q_CORE_EXPORT Q_DECL_CONST_FUNCTION double qSNaN(); | 
| 78 | #endif | 
| 79 | Q_CORE_EXPORT Q_DECL_CONST_FUNCTION double qQNaN(); | 
| 80 | Q_CORE_EXPORT Q_DECL_CONST_FUNCTION double qInf(); | 
| 81 |  | 
| 82 | Q_CORE_EXPORT quint32 qFloatDistance(float a, float b); | 
| 83 | Q_CORE_EXPORT quint64 qFloatDistance(double a, double b); | 
| 84 |  | 
| 85 | #define Q_INFINITY (QT_PREPEND_NAMESPACE(qInf)()) | 
| 86 | #if QT_CONFIG(signaling_nan) | 
| 87 | #  define Q_SNAN (QT_PREPEND_NAMESPACE(qSNaN)()) | 
| 88 | #endif | 
| 89 | #define Q_QNAN (QT_PREPEND_NAMESPACE(qQNaN)()) | 
| 90 |  | 
| 91 | // Overflow math. | 
| 92 | // This provides efficient implementations for int, unsigned, qsizetype and | 
| 93 | // size_t. Implementations for 8- and 16-bit types will work but may not be as | 
| 94 | // efficient. Implementations for 64-bit may be missing on 32-bit platforms. | 
| 95 |  | 
| 96 | #if (Q_CC_GNU >= 500 || __has_builtin(__builtin_add_overflow)) \ | 
| 97 |     && !(QT_POINTER_SIZE == 4 && defined(Q_CC_CLANG)) | 
| 98 | // GCC 5 and Clang 3.8 have builtins to detect overflows | 
| 99 | // 32 bit Clang has the builtins but tries to link a library which hasn't | 
| 100 | #define Q_INTRINSIC_MUL_OVERFLOW64 | 
| 101 |  | 
| 102 | template <typename T> inline | 
| 103 | typename std::enable_if_t<std::is_unsigned_v<T> || std::is_signed_v<T>, bool> | 
| 104 | qAddOverflow(T v1, T v2, T *r) | 
| 105 | { return __builtin_add_overflow(v1, v2, r); } | 
| 106 |  | 
| 107 | template <typename T> inline | 
| 108 | typename std::enable_if_t<std::is_unsigned_v<T> || std::is_signed_v<T>, bool> | 
| 109 | qSubOverflow(T v1, T v2, T *r) | 
| 110 | { return __builtin_sub_overflow(v1, v2, r); } | 
| 111 |  | 
| 112 | template <typename T> inline | 
| 113 | typename std::enable_if_t<std::is_unsigned_v<T> || std::is_signed_v<T>, bool> | 
| 114 | qMulOverflow(T v1, T v2, T *r) | 
| 115 | { return __builtin_mul_overflow(v1, v2, r); } | 
| 116 |  | 
| 117 | #else | 
| 118 | // Generic implementations | 
| 119 |  | 
| 120 | template <typename T> inline typename std::enable_if_t<std::is_unsigned_v<T>, bool> | 
| 121 | qAddOverflow(T v1, T v2, T *r) | 
| 122 | { | 
| 123 |     // unsigned additions are well-defined | 
| 124 |     *r = v1 + v2; | 
| 125 |     return v1 > T(v1 + v2); | 
| 126 | } | 
| 127 |  | 
| 128 | template <typename T> inline typename std::enable_if_t<std::is_signed_v<T>, bool> | 
| 129 | qAddOverflow(T v1, T v2, T *r) | 
| 130 | { | 
| 131 |     // Here's how we calculate the overflow: | 
| 132 |     // 1) unsigned addition is well-defined, so we can always execute it | 
| 133 |     // 2) conversion from unsigned back to signed is implementation- | 
| 134 |     //    defined and in the implementations we use, it's a no-op. | 
| 135 |     // 3) signed integer overflow happens if the sign of the two input operands | 
| 136 |     //    is the same but the sign of the result is different. In other words, | 
| 137 |     //    the sign of the result must be the same as the sign of either | 
| 138 |     //    operand. | 
| 139 |  | 
| 140 |     using U = typename std::make_unsigned_t<T>; | 
| 141 |     *r = T(U(v1) + U(v2)); | 
| 142 |  | 
| 143 |     // If int is two's complement, assume all integer types are too. | 
| 144 |     if (std::is_same_v<int32_t, int>) { | 
| 145 |         // Two's complement equivalent (generates slightly shorter code): | 
| 146 |         //  x ^ y             is negative if x and y have different signs | 
| 147 |         //  x & y             is negative if x and y are negative | 
| 148 |         // (x ^ z) & (y ^ z)  is negative if x and z have different signs | 
| 149 |         //                    AND y and z have different signs | 
| 150 |         return ((v1 ^ *r) & (v2 ^ *r)) < 0; | 
| 151 |     } | 
| 152 |  | 
| 153 |     bool s1 = (v1 < 0); | 
| 154 |     bool s2 = (v2 < 0); | 
| 155 |     bool sr = (*r < 0); | 
| 156 |     return s1 != sr && s2 != sr; | 
| 157 |     // also: return s1 == s2 && s1 != sr; | 
| 158 | } | 
| 159 |  | 
| 160 | template <typename T> inline typename std::enable_if_t<std::is_unsigned_v<T>, bool> | 
| 161 | qSubOverflow(T v1, T v2, T *r) | 
| 162 | { | 
| 163 |     // unsigned subtractions are well-defined | 
| 164 |     *r = v1 - v2; | 
| 165 |     return v1 < v2; | 
| 166 | } | 
| 167 |  | 
| 168 | template <typename T> inline typename std::enable_if_t<std::is_signed_v<T>, bool> | 
| 169 | qSubOverflow(T v1, T v2, T *r) | 
| 170 | { | 
| 171 |     // See above for explanation. This is the same with some signs reversed. | 
| 172 |     // We can't use qAddOverflow(v1, -v2, r) because it would be UB if | 
| 173 |     // v2 == std::numeric_limits<T>::min(). | 
| 174 |  | 
| 175 |     using U = typename std::make_unsigned_t<T>; | 
| 176 |     *r = T(U(v1) - U(v2)); | 
| 177 |  | 
| 178 |     if (std::is_same_v<int32_t, int>) | 
| 179 |         return ((v1 ^ *r) & (~v2 ^ *r)) < 0; | 
| 180 |  | 
| 181 |     bool s1 = (v1 < 0); | 
| 182 |     bool s2 = !(v2 < 0); | 
| 183 |     bool sr = (*r < 0); | 
| 184 |     return s1 != sr && s2 != sr; | 
| 185 |     // also: return s1 == s2 && s1 != sr; | 
| 186 | } | 
| 187 |  | 
| 188 | template <typename T> inline | 
| 189 | typename std::enable_if_t<std::is_unsigned_v<T> || std::is_signed_v<T>, bool> | 
| 190 | qMulOverflow(T v1, T v2, T *r) | 
| 191 | { | 
| 192 |     // use the next biggest type | 
| 193 |     // Note: for 64-bit systems where __int128 isn't supported, this will cause an error. | 
| 194 |     using LargerInt = QIntegerForSize<sizeof(T) * 2>; | 
| 195 |     using Larger = typename std::conditional_t<std::is_signed_v<T>, | 
| 196 |             typename LargerInt::Signed, typename LargerInt::Unsigned>; | 
| 197 |     Larger lr = Larger(v1) * Larger(v2); | 
| 198 |     *r = T(lr); | 
| 199 |     return lr > (std::numeric_limits<T>::max)() || lr < (std::numeric_limits<T>::min)(); | 
| 200 | } | 
| 201 |  | 
| 202 | # if defined(Q_INTRINSIC_MUL_OVERFLOW64) | 
| 203 | template <> inline bool qMulOverflow(quint64 v1, quint64 v2, quint64 *r) | 
| 204 | { | 
| 205 |     *r = v1 * v2; | 
| 206 |     return Q_UMULH(v1, v2); | 
| 207 | } | 
| 208 | template <> inline bool qMulOverflow(qint64 v1, qint64 v2, qint64 *r) | 
| 209 | { | 
| 210 |     // This is slightly more complex than the unsigned case above: the sign bit | 
| 211 |     // of 'low' must be replicated as the entire 'high', so the only valid | 
| 212 |     // values for 'high' are 0 and -1. Use unsigned multiply since it's the same | 
| 213 |     // as signed for the low bits and use a signed right shift to verify that | 
| 214 |     // 'high' is nothing but sign bits that match the sign of 'low'. | 
| 215 |  | 
| 216 |     qint64 high = Q_SMULH(v1, v2); | 
| 217 |     *r = qint64(quint64(v1) * quint64(v2)); | 
| 218 |     return (*r >> 63) != high; | 
| 219 | } | 
| 220 |  | 
| 221 | #   if defined(Q_OS_INTEGRITY) && defined(Q_PROCESSOR_ARM_64) | 
| 222 | template <> inline bool qMulOverflow(uint64_t v1, uint64_t v2, uint64_t *r) | 
| 223 | { | 
| 224 |     return qMulOverflow<quint64>(v1,v2,reinterpret_cast<quint64*>(r)); | 
| 225 | } | 
| 226 |  | 
| 227 | template <> inline bool qMulOverflow(int64_t v1, int64_t v2, int64_t *r) | 
| 228 | { | 
| 229 |     return qMulOverflow<qint64>(v1,v2,reinterpret_cast<qint64*>(r)); | 
| 230 | } | 
| 231 | #    endif // OS_INTEGRITY ARM64 | 
| 232 | #  endif // Q_INTRINSIC_MUL_OVERFLOW64 | 
| 233 |  | 
| 234 | #  if defined(Q_HAVE_ADDCARRY) && defined(Q_PROCESSOR_X86) | 
| 235 | // We can use intrinsics for the unsigned operations with MSVC | 
| 236 | template <> inline bool qAddOverflow(unsigned v1, unsigned v2, unsigned *r) | 
| 237 | { return _addcarry_u32(0, v1, v2, r); } | 
| 238 |  | 
| 239 | // 32-bit qMulOverflow is fine with the generic code above | 
| 240 |  | 
| 241 | template <> inline bool qAddOverflow(quint64 v1, quint64 v2, quint64 *r) | 
| 242 | { | 
| 243 | #    if defined(Q_PROCESSOR_X86_64) | 
| 244 |     return _addcarry_u64(0, v1, v2, reinterpret_cast<unsigned __int64 *>(r)); | 
| 245 | #    else | 
| 246 |     uint low, high; | 
| 247 |     uchar carry = _addcarry_u32(0, unsigned(v1), unsigned(v2), &low); | 
| 248 |     carry = _addcarry_u32(carry, v1 >> 32, v2 >> 32, &high); | 
| 249 |     *r = (quint64(high) << 32) | low; | 
| 250 |     return carry; | 
| 251 | #    endif // !x86-64 | 
| 252 | } | 
| 253 | #  endif // HAVE ADDCARRY | 
| 254 | #undef Q_HAVE_ADDCARRY | 
| 255 | #endif // !GCC | 
| 256 |  | 
| 257 | // Implementations for addition, subtraction or multiplication by a | 
| 258 | // compile-time constant. For addition and subtraction, we simply call the code | 
| 259 | // that detects overflow at runtime. For multiplication, we compare to the | 
| 260 | // maximum possible values before multiplying to ensure no overflow happens. | 
| 261 |  | 
| 262 | template <typename T, T V2> bool qAddOverflow(T v1, std::integral_constant<T, V2>, T *r) | 
| 263 | { | 
| 264 |     return qAddOverflow(v1, V2, r); | 
| 265 | } | 
| 266 |  | 
| 267 | template <auto V2, typename T> bool qAddOverflow(T v1, T *r) | 
| 268 | { | 
| 269 |     return qAddOverflow(v1, std::integral_constant<T, V2>{}, r); | 
| 270 | } | 
| 271 |  | 
| 272 | template <typename T, T V2> bool qSubOverflow(T v1, std::integral_constant<T, V2>, T *r) | 
| 273 | { | 
| 274 |     return qSubOverflow(v1, V2, r); | 
| 275 | } | 
| 276 |  | 
| 277 | template <auto V2, typename T> bool qSubOverflow(T v1, T *r) | 
| 278 | { | 
| 279 |     return qSubOverflow(v1, std::integral_constant<T, V2>{}, r); | 
| 280 | } | 
| 281 |  | 
| 282 | template <typename T, T V2> bool qMulOverflow(T v1, std::integral_constant<T, V2>, T *r) | 
| 283 | { | 
| 284 |     // Runtime detection for anything smaller than or equal to a register | 
| 285 |     // width, as most architectures' multiplication instructions actually | 
| 286 |     // produce a result twice as wide as the input registers, allowing us to | 
| 287 |     // efficiently detect the overflow. | 
| 288 |     if constexpr (sizeof(T) <= sizeof(qregisteruint)) { | 
| 289 |         return qMulOverflow(v1, V2, r); | 
| 290 |  | 
| 291 | #ifdef Q_INTRINSIC_MUL_OVERFLOW64 | 
| 292 |     } else if constexpr (sizeof(T) <= sizeof(quint64)) { | 
| 293 |         // If we have intrinsics detecting overflow of 64-bit multiplications, | 
| 294 |         // then detect overflows through them up to 64 bits. | 
| 295 |         return qMulOverflow(v1, V2, r); | 
| 296 | #endif | 
| 297 |  | 
| 298 |     } else if constexpr (V2 == 0 || V2 == 1) { | 
| 299 |         // trivial cases (and simplify logic below due to division by zero) | 
| 300 |         *r = v1 * V2; | 
| 301 |         return false; | 
| 302 |     } else if constexpr (V2 == -1) { | 
| 303 |         // multiplication by -1 is valid *except* for signed minimum values | 
| 304 |         // (necessary to avoid diving min() by -1, which is an overflow) | 
| 305 |         if (v1 < 0 && v1 == (std::numeric_limits<T>::min)()) | 
| 306 |             return true; | 
| 307 |         *r = -v1; | 
| 308 |         return false; | 
| 309 |     } else { | 
| 310 |         // For 64-bit multiplications on 32-bit platforms, let's instead compare v1 | 
| 311 |         // against the bounds that would overflow. | 
| 312 |         constexpr T Highest = (std::numeric_limits<T>::max)() / V2; | 
| 313 |         constexpr T Lowest = (std::numeric_limits<T>::min)() / V2; | 
| 314 |         if constexpr (Highest > Lowest) { | 
| 315 |             if (v1 > Highest || v1 < Lowest) | 
| 316 |                 return true; | 
| 317 |         } else { | 
| 318 |             // this can only happen if V2 < 0 | 
| 319 |             static_assert(V2 < 0); | 
| 320 |             if (v1 > Lowest || v1 < Highest) | 
| 321 |                 return true; | 
| 322 |         } | 
| 323 |  | 
| 324 |         *r = v1 * V2; | 
| 325 |         return false; | 
| 326 |     } | 
| 327 | } | 
| 328 |  | 
| 329 | template <auto V2, typename T> bool qMulOverflow(T v1, T *r) | 
| 330 | { | 
| 331 |     if constexpr (V2 == 2) | 
| 332 |         return qAddOverflow(v1, v1, r); | 
| 333 |     return qMulOverflow(v1, std::integral_constant<T, V2>{}, r); | 
| 334 | } | 
| 335 |  | 
| 336 | template <typename T> | 
| 337 | constexpr inline T qAbs(const T &t) { return t >= 0 ? t : -t; } | 
| 338 |  | 
| 339 | namespace QtPrivate { | 
| 340 | template <typename T, | 
| 341 |           typename std::enable_if_t<std::is_integral_v<T>, bool> = true> | 
| 342 | constexpr inline auto qUnsignedAbs(T t) | 
| 343 | { | 
| 344 |     using U = std::make_unsigned_t<T>; | 
| 345 |     return (t >= 0) ? U(t) : U(~U(t) + U(1)); | 
| 346 | } | 
| 347 | } // namespace QtPrivate | 
| 348 |  | 
| 349 | // gcc < 10 doesn't have __has_builtin | 
| 350 | #if defined(Q_PROCESSOR_ARM_64) && (__has_builtin(__builtin_round) || defined(Q_CC_GNU)) && !defined(Q_CC_CLANG) | 
| 351 | // ARM64 has a single instruction that can do C++ rounding with conversion to integer. | 
| 352 | // Note current clang versions have non-constexpr __builtin_round, ### allow clang this path when they fix it. | 
| 353 | constexpr inline int qRound(double d) | 
| 354 | { return int(__builtin_round(d)); } | 
| 355 | constexpr inline int qRound(float f) | 
| 356 | { return int(__builtin_roundf(f)); } | 
| 357 | constexpr inline qint64 qRound64(double d) | 
| 358 | { return qint64(__builtin_round(d)); } | 
| 359 | constexpr inline qint64 qRound64(float f) | 
| 360 | { return qint64(__builtin_roundf(f)); } | 
| 361 | #elif defined(__SSE2__) && (__has_builtin(__builtin_copysign) || defined(Q_CC_GNU)) | 
| 362 | // SSE has binary operations directly on floating point making copysign fast | 
| 363 | constexpr inline int qRound(double d) | 
| 364 | { return int(d + __builtin_copysign(0.5, d)); } | 
| 365 | constexpr inline int qRound(float f) | 
| 366 | { return int(f + __builtin_copysignf(0.5f, f)); } | 
| 367 | constexpr inline qint64 qRound64(double d) | 
| 368 | { return qint64(d + __builtin_copysign(0.5, d)); } | 
| 369 | constexpr inline qint64 qRound64(float f) | 
| 370 | { return qint64(f + __builtin_copysignf(0.5f, f)); } | 
| 371 | #else | 
| 372 | constexpr inline int qRound(double d) | 
| 373 | { return d >= 0.0 ? int(d + 0.5) : int(d - 0.5); } | 
| 374 | constexpr inline int qRound(float d) | 
| 375 | { return d >= 0.0f ? int(d + 0.5f) : int(d - 0.5f); } | 
| 376 |  | 
| 377 | constexpr inline qint64 qRound64(double d) | 
| 378 | { return d >= 0.0 ? qint64(d + 0.5) : qint64(d - 0.5); } | 
| 379 | constexpr inline qint64 qRound64(float d) | 
| 380 | { return d >= 0.0f ? qint64(d + 0.5f) : qint64(d - 0.5f); } | 
| 381 | #endif | 
| 382 |  | 
| 383 | namespace QtPrivate { | 
| 384 | template <typename T> | 
| 385 | constexpr inline const T &min(const T &a, const T &b) { return (a < b) ? a : b; } | 
| 386 | } | 
| 387 |  | 
| 388 | [[nodiscard]] constexpr bool qFuzzyCompare(double p1, double p2) noexcept | 
| 389 | { | 
| 390 |     return (qAbs(t: p1 - p2) * 1000000000000. <= QtPrivate::min(a: qAbs(t: p1), b: qAbs(t: p2))); | 
| 391 | } | 
| 392 |  | 
| 393 | [[nodiscard]] constexpr bool qFuzzyCompare(float p1, float p2) noexcept | 
| 394 | { | 
| 395 |     return (qAbs(t: p1 - p2) * 100000.f <= QtPrivate::min(a: qAbs(t: p1), b: qAbs(t: p2))); | 
| 396 | } | 
| 397 |  | 
| 398 | [[nodiscard]] constexpr bool qFuzzyIsNull(double d) noexcept | 
| 399 | { | 
| 400 |     return qAbs(t: d) <= 0.000000000001; | 
| 401 | } | 
| 402 |  | 
| 403 | [[nodiscard]] constexpr bool qFuzzyIsNull(float f) noexcept | 
| 404 | { | 
| 405 |     return qAbs(t: f) <= 0.00001f; | 
| 406 | } | 
| 407 |  | 
| 408 | QT_WARNING_PUSH | 
| 409 | QT_WARNING_DISABLE_FLOAT_COMPARE | 
| 410 |  | 
| 411 | [[nodiscard]] constexpr bool qIsNull(double d) noexcept | 
| 412 | { | 
| 413 |     return d == 0.0; | 
| 414 | } | 
| 415 |  | 
| 416 | [[nodiscard]] constexpr bool qIsNull(float f) noexcept | 
| 417 | { | 
| 418 |     return f == 0.0f; | 
| 419 | } | 
| 420 |  | 
| 421 | QT_WARNING_POP | 
| 422 |  | 
| 423 | inline int qIntCast(double f) { return int(f); } | 
| 424 | inline int qIntCast(float f) { return int(f); } | 
| 425 |  | 
| 426 | QT_END_NAMESPACE | 
| 427 |  | 
| 428 | #endif // QNUMERIC_H | 
| 429 |  |