| 1 | // Protocol Buffers - Google's data interchange format |
| 2 | // Copyright 2008 Google Inc. All rights reserved. |
| 3 | // https://developers.google.com/protocol-buffers/ |
| 4 | // |
| 5 | // Redistribution and use in source and binary forms, with or without |
| 6 | // modification, are permitted provided that the following conditions are |
| 7 | // met: |
| 8 | // |
| 9 | // * Redistributions of source code must retain the above copyright |
| 10 | // notice, this list of conditions and the following disclaimer. |
| 11 | // * Redistributions in binary form must reproduce the above |
| 12 | // copyright notice, this list of conditions and the following disclaimer |
| 13 | // in the documentation and/or other materials provided with the |
| 14 | // distribution. |
| 15 | // * Neither the name of Google Inc. nor the names of its |
| 16 | // contributors may be used to endorse or promote products derived from |
| 17 | // this software without specific prior written permission. |
| 18 | // |
| 19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | |
| 31 | #ifndef GOOGLE_PROTOBUF_STUBS_PORT_H_ |
| 32 | #define GOOGLE_PROTOBUF_STUBS_PORT_H_ |
| 33 | |
| 34 | #include <assert.h> |
| 35 | #include <cstdint> |
| 36 | #include <stdlib.h> |
| 37 | #include <cstddef> |
| 38 | #include <string> |
| 39 | #include <string.h> |
| 40 | |
| 41 | #include <google/protobuf/stubs/platform_macros.h> |
| 42 | |
| 43 | #include <google/protobuf/port_def.inc> |
| 44 | |
| 45 | #undef PROTOBUF_LITTLE_ENDIAN |
| 46 | #ifdef _WIN32 |
| 47 | // Assuming windows is always little-endian. |
| 48 | // TODO(xiaofeng): The PROTOBUF_LITTLE_ENDIAN is not only used for |
| 49 | // optimization but also for correctness. We should define an |
| 50 | // different macro to test the big-endian code path in coded_stream. |
| 51 | #if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST) |
| 52 | #define PROTOBUF_LITTLE_ENDIAN 1 |
| 53 | #endif |
| 54 | #if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER) |
| 55 | // If MSVC has "/RTCc" set, it will complain about truncating casts at |
| 56 | // runtime. This file contains some intentional truncating casts. |
| 57 | #pragma runtime_checks("c", off) |
| 58 | #endif |
| 59 | #else |
| 60 | #include <sys/param.h> // __BYTE_ORDER |
| 61 | #if defined(__OpenBSD__) |
| 62 | #include <endian.h> |
| 63 | #endif |
| 64 | #if ((defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \ |
| 65 | (defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN) || \ |
| 66 | (defined(BYTE_ORDER) && BYTE_ORDER == LITTLE_ENDIAN)) && \ |
| 67 | !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST) |
| 68 | #define PROTOBUF_LITTLE_ENDIAN 1 |
| 69 | #endif |
| 70 | #endif |
| 71 | |
| 72 | // These #includes are for the byte swap functions declared later on. |
| 73 | #ifdef _MSC_VER |
| 74 | #include <stdlib.h> // NOLINT(build/include) |
| 75 | #include <intrin.h> |
| 76 | #elif defined(__APPLE__) |
| 77 | #include <libkern/OSByteOrder.h> |
| 78 | #elif defined(__GLIBC__) || defined(__BIONIC__) || defined(__CYGWIN__) |
| 79 | #include <byteswap.h> // IWYU pragma: export |
| 80 | #endif |
| 81 | |
| 82 | // Legacy: some users reference these (internal-only) macros even though we |
| 83 | // don't need them any more. |
| 84 | #if defined(_MSC_VER) && defined(PROTOBUF_USE_DLLS) |
| 85 | #ifdef LIBPROTOBUF_EXPORTS |
| 86 | #define LIBPROTOBUF_EXPORT __declspec(dllexport) |
| 87 | #else |
| 88 | #define LIBPROTOBUF_EXPORT __declspec(dllimport) |
| 89 | #endif |
| 90 | #ifdef LIBPROTOC_EXPORTS |
| 91 | #define LIBPROTOC_EXPORT __declspec(dllexport) |
| 92 | #else |
| 93 | #define LIBPROTOC_EXPORT __declspec(dllimport) |
| 94 | #endif |
| 95 | #else |
| 96 | #define LIBPROTOBUF_EXPORT |
| 97 | #define LIBPROTOC_EXPORT |
| 98 | #endif |
| 99 | |
| 100 | #define PROTOBUF_RUNTIME_DEPRECATED(message) PROTOBUF_DEPRECATED_MSG(message) |
| 101 | #define GOOGLE_PROTOBUF_RUNTIME_DEPRECATED(message) \ |
| 102 | PROTOBUF_DEPRECATED_MSG(message) |
| 103 | |
| 104 | // =================================================================== |
| 105 | // from google3/base/port.h |
| 106 | |
| 107 | #if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \ |
| 108 | (defined(_MSC_VER) && _MSC_VER >= 1900)) |
| 109 | // Define this to 1 if the code is compiled in C++11 mode; leave it |
| 110 | // undefined otherwise. Do NOT define it to 0 -- that causes |
| 111 | // '#ifdef LANG_CXX11' to behave differently from '#if LANG_CXX11'. |
| 112 | #define LANG_CXX11 1 |
| 113 | #else |
| 114 | #error "Protobuf requires at least C++11." |
| 115 | #endif |
| 116 | |
| 117 | namespace google { |
| 118 | namespace protobuf { |
| 119 | |
| 120 | typedef unsigned int uint; |
| 121 | |
| 122 | typedef int8_t int8; |
| 123 | typedef int16_t int16; |
| 124 | typedef int32_t int32; |
| 125 | typedef int64_t int64; |
| 126 | |
| 127 | typedef uint8_t uint8; |
| 128 | typedef uint16_t uint16; |
| 129 | typedef uint32_t uint32; |
| 130 | typedef uint64_t uint64; |
| 131 | |
| 132 | static const int32 kint32max = 0x7FFFFFFF; |
| 133 | static const int32 kint32min = -kint32max - 1; |
| 134 | static const int64 kint64max = PROTOBUF_LONGLONG(0x7FFFFFFFFFFFFFFF); |
| 135 | static const int64 kint64min = -kint64max - 1; |
| 136 | static const uint32 kuint32max = 0xFFFFFFFFu; |
| 137 | static const uint64 kuint64max = PROTOBUF_ULONGLONG(0xFFFFFFFFFFFFFFFF); |
| 138 | |
| 139 | #if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\ |
| 140 | defined(MEMORY_SANITIZER) |
| 141 | |
| 142 | #ifdef __cplusplus |
| 143 | extern "C" { |
| 144 | #endif // __cplusplus |
| 145 | uint16_t __sanitizer_unaligned_load16(const void *p); |
| 146 | uint32_t __sanitizer_unaligned_load32(const void *p); |
| 147 | uint64_t __sanitizer_unaligned_load64(const void *p); |
| 148 | void __sanitizer_unaligned_store16(void *p, uint16_t v); |
| 149 | void __sanitizer_unaligned_store32(void *p, uint32_t v); |
| 150 | void __sanitizer_unaligned_store64(void *p, uint64_t v); |
| 151 | #ifdef __cplusplus |
| 152 | } // extern "C" |
| 153 | #endif // __cplusplus |
| 154 | |
| 155 | inline uint16 GOOGLE_UNALIGNED_LOAD16(const void *p) { |
| 156 | return __sanitizer_unaligned_load16(p); |
| 157 | } |
| 158 | |
| 159 | inline uint32 GOOGLE_UNALIGNED_LOAD32(const void *p) { |
| 160 | return __sanitizer_unaligned_load32(p); |
| 161 | } |
| 162 | |
| 163 | inline uint64 GOOGLE_UNALIGNED_LOAD64(const void *p) { |
| 164 | return __sanitizer_unaligned_load64(p); |
| 165 | } |
| 166 | |
| 167 | inline void GOOGLE_UNALIGNED_STORE16(void *p, uint16 v) { |
| 168 | __sanitizer_unaligned_store16(p, v); |
| 169 | } |
| 170 | |
| 171 | inline void GOOGLE_UNALIGNED_STORE32(void *p, uint32 v) { |
| 172 | __sanitizer_unaligned_store32(p, v); |
| 173 | } |
| 174 | |
| 175 | inline void GOOGLE_UNALIGNED_STORE64(void *p, uint64 v) { |
| 176 | __sanitizer_unaligned_store64(p, v); |
| 177 | } |
| 178 | |
| 179 | #elif defined(GOOGLE_PROTOBUF_USE_UNALIGNED) && GOOGLE_PROTOBUF_USE_UNALIGNED |
| 180 | |
| 181 | #define GOOGLE_UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p)) |
| 182 | #define GOOGLE_UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p)) |
| 183 | #define GOOGLE_UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p)) |
| 184 | |
| 185 | #define GOOGLE_UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val)) |
| 186 | #define GOOGLE_UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val)) |
| 187 | #define GOOGLE_UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val)) |
| 188 | |
| 189 | #else |
| 190 | inline uint16 GOOGLE_UNALIGNED_LOAD16(const void *p) { |
| 191 | uint16 t; |
| 192 | memcpy(dest: &t, src: p, n: sizeof t); |
| 193 | return t; |
| 194 | } |
| 195 | |
| 196 | inline uint32 GOOGLE_UNALIGNED_LOAD32(const void *p) { |
| 197 | uint32 t; |
| 198 | memcpy(dest: &t, src: p, n: sizeof t); |
| 199 | return t; |
| 200 | } |
| 201 | |
| 202 | inline uint64 GOOGLE_UNALIGNED_LOAD64(const void *p) { |
| 203 | uint64 t; |
| 204 | memcpy(dest: &t, src: p, n: sizeof t); |
| 205 | return t; |
| 206 | } |
| 207 | |
| 208 | inline void GOOGLE_UNALIGNED_STORE16(void *p, uint16 v) { |
| 209 | memcpy(dest: p, src: &v, n: sizeof v); |
| 210 | } |
| 211 | |
| 212 | inline void GOOGLE_UNALIGNED_STORE32(void *p, uint32 v) { |
| 213 | memcpy(dest: p, src: &v, n: sizeof v); |
| 214 | } |
| 215 | |
| 216 | inline void GOOGLE_UNALIGNED_STORE64(void *p, uint64 v) { |
| 217 | memcpy(dest: p, src: &v, n: sizeof v); |
| 218 | } |
| 219 | #endif |
| 220 | |
| 221 | #if defined(GOOGLE_PROTOBUF_OS_NACL) \ |
| 222 | || (defined(__ANDROID__) && defined(__clang__) \ |
| 223 | && (__clang_major__ == 3 && __clang_minor__ == 8) \ |
| 224 | && (__clang_patchlevel__ < 275480)) |
| 225 | # define GOOGLE_PROTOBUF_USE_PORTABLE_LOG2 |
| 226 | #endif |
| 227 | |
| 228 | #if defined(_MSC_VER) |
| 229 | #define GOOGLE_THREAD_LOCAL __declspec(thread) |
| 230 | #else |
| 231 | #define GOOGLE_THREAD_LOCAL __thread |
| 232 | #endif |
| 233 | |
| 234 | // The following guarantees declaration of the byte swap functions. |
| 235 | #ifdef _MSC_VER |
| 236 | #define bswap_16(x) _byteswap_ushort(x) |
| 237 | #define bswap_32(x) _byteswap_ulong(x) |
| 238 | #define bswap_64(x) _byteswap_uint64(x) |
| 239 | |
| 240 | #elif defined(__APPLE__) |
| 241 | // Mac OS X / Darwin features |
| 242 | #define bswap_16(x) OSSwapInt16(x) |
| 243 | #define bswap_32(x) OSSwapInt32(x) |
| 244 | #define bswap_64(x) OSSwapInt64(x) |
| 245 | |
| 246 | #elif !defined(__GLIBC__) && !defined(__BIONIC__) && !defined(__CYGWIN__) |
| 247 | |
| 248 | #ifndef bswap_16 |
| 249 | static inline uint16 bswap_16(uint16 x) { |
| 250 | return static_cast<uint16>(((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)); |
| 251 | } |
| 252 | #define bswap_16(x) bswap_16(x) |
| 253 | #endif |
| 254 | |
| 255 | #ifndef bswap_32 |
| 256 | static inline uint32 bswap_32(uint32 x) { |
| 257 | return (((x & 0xFF) << 24) | |
| 258 | ((x & 0xFF00) << 8) | |
| 259 | ((x & 0xFF0000) >> 8) | |
| 260 | ((x & 0xFF000000) >> 24)); |
| 261 | } |
| 262 | #define bswap_32(x) bswap_32(x) |
| 263 | #endif |
| 264 | |
| 265 | #ifndef bswap_64 |
| 266 | static inline uint64 bswap_64(uint64 x) { |
| 267 | return (((x & PROTOBUF_ULONGLONG(0xFF)) << 56) | |
| 268 | ((x & PROTOBUF_ULONGLONG(0xFF00)) << 40) | |
| 269 | ((x & PROTOBUF_ULONGLONG(0xFF0000)) << 24) | |
| 270 | ((x & PROTOBUF_ULONGLONG(0xFF000000)) << 8) | |
| 271 | ((x & PROTOBUF_ULONGLONG(0xFF00000000)) >> 8) | |
| 272 | ((x & PROTOBUF_ULONGLONG(0xFF0000000000)) >> 24) | |
| 273 | ((x & PROTOBUF_ULONGLONG(0xFF000000000000)) >> 40) | |
| 274 | ((x & PROTOBUF_ULONGLONG(0xFF00000000000000)) >> 56)); |
| 275 | } |
| 276 | #define bswap_64(x) bswap_64(x) |
| 277 | #endif |
| 278 | |
| 279 | #endif |
| 280 | |
| 281 | // =================================================================== |
| 282 | // from google3/util/bits/bits.h |
| 283 | |
| 284 | class Bits { |
| 285 | public: |
| 286 | static uint32 Log2FloorNonZero(uint32 n) { |
| 287 | #if defined(__GNUC__) |
| 288 | return 31 ^ static_cast<uint32>(__builtin_clz(n)); |
| 289 | #elif defined(_MSC_VER) |
| 290 | unsigned long where; |
| 291 | _BitScanReverse(&where, n); |
| 292 | return where; |
| 293 | #else |
| 294 | return Log2FloorNonZero_Portable(n); |
| 295 | #endif |
| 296 | } |
| 297 | |
| 298 | static uint32 Log2FloorNonZero64(uint64 n) { |
| 299 | // Older versions of clang run into an instruction-selection failure when |
| 300 | // it encounters __builtin_clzll: |
| 301 | // https://bugs.chromium.org/p/nativeclient/issues/detail?id=4395 |
| 302 | // This includes arm-nacl-clang and clang in older Android NDK versions. |
| 303 | // To work around this, when we build with those we use the portable |
| 304 | // implementation instead. |
| 305 | #if defined(__GNUC__) && !defined(GOOGLE_PROTOBUF_USE_PORTABLE_LOG2) |
| 306 | return 63 ^ static_cast<uint32>(__builtin_clzll(n)); |
| 307 | #elif defined(_MSC_VER) && defined(_M_X64) |
| 308 | unsigned long where; |
| 309 | _BitScanReverse64(&where, n); |
| 310 | return where; |
| 311 | #else |
| 312 | return Log2FloorNonZero64_Portable(n); |
| 313 | #endif |
| 314 | } |
| 315 | private: |
| 316 | static int Log2FloorNonZero_Portable(uint32 n) { |
| 317 | if (n == 0) |
| 318 | return -1; |
| 319 | int log = 0; |
| 320 | uint32 value = n; |
| 321 | for (int i = 4; i >= 0; --i) { |
| 322 | int shift = (1 << i); |
| 323 | uint32 x = value >> shift; |
| 324 | if (x != 0) { |
| 325 | value = x; |
| 326 | log += shift; |
| 327 | } |
| 328 | } |
| 329 | assert(value == 1); |
| 330 | return log; |
| 331 | } |
| 332 | |
| 333 | static int Log2FloorNonZero64_Portable(uint64 n) { |
| 334 | const uint32 topbits = static_cast<uint32>(n >> 32); |
| 335 | if (topbits == 0) { |
| 336 | // Top bits are zero, so scan in bottom bits |
| 337 | return static_cast<int>(Log2FloorNonZero(n: static_cast<uint32>(n))); |
| 338 | } else { |
| 339 | return 32 + static_cast<int>(Log2FloorNonZero(n: topbits)); |
| 340 | } |
| 341 | } |
| 342 | }; |
| 343 | |
| 344 | // =================================================================== |
| 345 | // from google3/util/endian/endian.h |
| 346 | PROTOBUF_EXPORT uint32 ghtonl(uint32 x); |
| 347 | |
| 348 | class BigEndian { |
| 349 | public: |
| 350 | #ifdef PROTOBUF_LITTLE_ENDIAN |
| 351 | |
| 352 | static uint16 FromHost16(uint16 x) { return bswap_16(x); } |
| 353 | static uint16 ToHost16(uint16 x) { return bswap_16(x); } |
| 354 | |
| 355 | static uint32 FromHost32(uint32 x) { return bswap_32(x); } |
| 356 | static uint32 ToHost32(uint32 x) { return bswap_32(x); } |
| 357 | |
| 358 | static uint64 FromHost64(uint64 x) { return bswap_64(x); } |
| 359 | static uint64 ToHost64(uint64 x) { return bswap_64(x); } |
| 360 | |
| 361 | static bool IsLittleEndian() { return true; } |
| 362 | |
| 363 | #else |
| 364 | |
| 365 | static uint16 FromHost16(uint16 x) { return x; } |
| 366 | static uint16 ToHost16(uint16 x) { return x; } |
| 367 | |
| 368 | static uint32 FromHost32(uint32 x) { return x; } |
| 369 | static uint32 ToHost32(uint32 x) { return x; } |
| 370 | |
| 371 | static uint64 FromHost64(uint64 x) { return x; } |
| 372 | static uint64 ToHost64(uint64 x) { return x; } |
| 373 | |
| 374 | static bool IsLittleEndian() { return false; } |
| 375 | |
| 376 | #endif /* ENDIAN */ |
| 377 | |
| 378 | // Functions to do unaligned loads and stores in big-endian order. |
| 379 | static uint16 Load16(const void *p) { |
| 380 | return ToHost16(x: GOOGLE_UNALIGNED_LOAD16(p)); |
| 381 | } |
| 382 | |
| 383 | static void Store16(void *p, uint16 v) { |
| 384 | GOOGLE_UNALIGNED_STORE16(p, v: FromHost16(x: v)); |
| 385 | } |
| 386 | |
| 387 | static uint32 Load32(const void *p) { |
| 388 | return ToHost32(x: GOOGLE_UNALIGNED_LOAD32(p)); |
| 389 | } |
| 390 | |
| 391 | static void Store32(void *p, uint32 v) { |
| 392 | GOOGLE_UNALIGNED_STORE32(p, v: FromHost32(x: v)); |
| 393 | } |
| 394 | |
| 395 | static uint64 Load64(const void *p) { |
| 396 | return ToHost64(x: GOOGLE_UNALIGNED_LOAD64(p)); |
| 397 | } |
| 398 | |
| 399 | static void Store64(void *p, uint64 v) { |
| 400 | GOOGLE_UNALIGNED_STORE64(p, v: FromHost64(x: v)); |
| 401 | } |
| 402 | }; |
| 403 | |
| 404 | } // namespace protobuf |
| 405 | } // namespace google |
| 406 | |
| 407 | #include <google/protobuf/port_undef.inc> |
| 408 | |
| 409 | #endif // GOOGLE_PROTOBUF_STUBS_PORT_H_ |
| 410 | |