| 1 | /* elf.c -- Get debug data from an ELF file for backtraces. |
| 2 | Copyright (C) 2012-2025 Free Software Foundation, Inc. |
| 3 | Written by Ian Lance Taylor, Google. |
| 4 | |
| 5 | Redistribution and use in source and binary forms, with or without |
| 6 | modification, are permitted provided that the following conditions are |
| 7 | met: |
| 8 | |
| 9 | (1) Redistributions of source code must retain the above copyright |
| 10 | notice, this list of conditions and the following disclaimer. |
| 11 | |
| 12 | (2) Redistributions in binary form must reproduce the above copyright |
| 13 | notice, this list of conditions and the following disclaimer in |
| 14 | the documentation and/or other materials provided with the |
| 15 | distribution. |
| 16 | |
| 17 | (3) The name of the author may not be used to |
| 18 | endorse or promote products derived from this software without |
| 19 | specific prior written permission. |
| 20 | |
| 21 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| 22 | IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 23 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 24 | DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, |
| 25 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 26 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 27 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 28 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
| 29 | STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING |
| 30 | IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 31 | POSSIBILITY OF SUCH DAMAGE. */ |
| 32 | |
| 33 | #include "config.h" |
| 34 | |
| 35 | #include <errno.h> |
| 36 | #include <stdlib.h> |
| 37 | #include <string.h> |
| 38 | #include <sys/types.h> |
| 39 | #include <sys/stat.h> |
| 40 | #include <unistd.h> |
| 41 | |
| 42 | #ifdef HAVE_DL_ITERATE_PHDR |
| 43 | #ifdef HAVE_LINK_H |
| 44 | #include <link.h> |
| 45 | #endif |
| 46 | #ifdef HAVE_SYS_LINK_H |
| 47 | #include <sys/link.h> |
| 48 | #endif |
| 49 | #endif |
| 50 | |
| 51 | #include "backtrace.h" |
| 52 | #include "internal.h" |
| 53 | |
| 54 | #ifndef S_ISLNK |
| 55 | #ifndef S_IFLNK |
| 56 | #define S_IFLNK 0120000 |
| 57 | #endif |
| 58 | #ifndef S_IFMT |
| 59 | #define S_IFMT 0170000 |
| 60 | #endif |
| 61 | #define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK) |
| 62 | #endif |
| 63 | |
| 64 | #ifndef __GNUC__ |
| 65 | #define __builtin_prefetch(p, r, l) |
| 66 | #define unlikely(x) (x) |
| 67 | #else |
| 68 | #define unlikely(x) __builtin_expect(!!(x), 0) |
| 69 | #endif |
| 70 | |
| 71 | #if !defined(HAVE_DECL_STRNLEN) || !HAVE_DECL_STRNLEN |
| 72 | |
| 73 | /* If strnlen is not declared, provide our own version. */ |
| 74 | |
| 75 | static size_t |
| 76 | xstrnlen (const char *s, size_t maxlen) |
| 77 | { |
| 78 | size_t i; |
| 79 | |
| 80 | for (i = 0; i < maxlen; ++i) |
| 81 | if (s[i] == '\0') |
| 82 | break; |
| 83 | return i; |
| 84 | } |
| 85 | |
| 86 | #define strnlen xstrnlen |
| 87 | |
| 88 | #endif |
| 89 | |
| 90 | #ifndef HAVE_LSTAT |
| 91 | |
| 92 | /* Dummy version of lstat for systems that don't have it. */ |
| 93 | |
| 94 | static int |
| 95 | xlstat (const char *path ATTRIBUTE_UNUSED, struct stat *st ATTRIBUTE_UNUSED) |
| 96 | { |
| 97 | return -1; |
| 98 | } |
| 99 | |
| 100 | #define lstat xlstat |
| 101 | |
| 102 | #endif |
| 103 | |
| 104 | #ifndef HAVE_READLINK |
| 105 | |
| 106 | /* Dummy version of readlink for systems that don't have it. */ |
| 107 | |
| 108 | static ssize_t |
| 109 | xreadlink (const char *path ATTRIBUTE_UNUSED, char *buf ATTRIBUTE_UNUSED, |
| 110 | size_t bufsz ATTRIBUTE_UNUSED) |
| 111 | { |
| 112 | return -1; |
| 113 | } |
| 114 | |
| 115 | #define readlink xreadlink |
| 116 | |
| 117 | #endif |
| 118 | |
| 119 | #ifndef HAVE_DL_ITERATE_PHDR |
| 120 | |
| 121 | /* Dummy version of dl_iterate_phdr for systems that don't have it. */ |
| 122 | |
| 123 | #define dl_phdr_info x_dl_phdr_info |
| 124 | #define dl_iterate_phdr x_dl_iterate_phdr |
| 125 | |
| 126 | struct dl_phdr_info |
| 127 | { |
| 128 | uintptr_t dlpi_addr; |
| 129 | const char *dlpi_name; |
| 130 | }; |
| 131 | |
| 132 | static int |
| 133 | dl_iterate_phdr (int (*callback) (struct dl_phdr_info *, |
| 134 | size_t, void *) ATTRIBUTE_UNUSED, |
| 135 | void *data ATTRIBUTE_UNUSED) |
| 136 | { |
| 137 | return 0; |
| 138 | } |
| 139 | |
| 140 | #endif /* ! defined (HAVE_DL_ITERATE_PHDR) */ |
| 141 | |
| 142 | /* The configure script must tell us whether we are 32-bit or 64-bit |
| 143 | ELF. We could make this code test and support either possibility, |
| 144 | but there is no point. This code only works for the currently |
| 145 | running executable, which means that we know the ELF mode at |
| 146 | configure time. */ |
| 147 | |
| 148 | #if BACKTRACE_ELF_SIZE != 32 && BACKTRACE_ELF_SIZE != 64 |
| 149 | #error "Unknown BACKTRACE_ELF_SIZE" |
| 150 | #endif |
| 151 | |
| 152 | /* <link.h> might #include <elf.h> which might define our constants |
| 153 | with slightly different values. Undefine them to be safe. */ |
| 154 | |
| 155 | #undef EI_NIDENT |
| 156 | #undef EI_MAG0 |
| 157 | #undef EI_MAG1 |
| 158 | #undef EI_MAG2 |
| 159 | #undef EI_MAG3 |
| 160 | #undef EI_CLASS |
| 161 | #undef EI_DATA |
| 162 | #undef EI_VERSION |
| 163 | #undef ELF_MAG0 |
| 164 | #undef ELF_MAG1 |
| 165 | #undef ELF_MAG2 |
| 166 | #undef ELF_MAG3 |
| 167 | #undef ELFCLASS32 |
| 168 | #undef ELFCLASS64 |
| 169 | #undef ELFDATA2LSB |
| 170 | #undef ELFDATA2MSB |
| 171 | #undef EV_CURRENT |
| 172 | #undef ET_DYN |
| 173 | #undef EM_PPC64 |
| 174 | #undef EF_PPC64_ABI |
| 175 | #undef SHN_LORESERVE |
| 176 | #undef SHN_XINDEX |
| 177 | #undef SHN_UNDEF |
| 178 | #undef SHT_PROGBITS |
| 179 | #undef SHT_SYMTAB |
| 180 | #undef SHT_STRTAB |
| 181 | #undef SHT_DYNSYM |
| 182 | #undef SHF_COMPRESSED |
| 183 | #undef STT_OBJECT |
| 184 | #undef STT_FUNC |
| 185 | #undef NT_GNU_BUILD_ID |
| 186 | #undef ELFCOMPRESS_ZLIB |
| 187 | #undef ELFCOMPRESS_ZSTD |
| 188 | |
| 189 | /* Basic types. */ |
| 190 | |
| 191 | typedef uint16_t b_elf_half; /* Elf_Half. */ |
| 192 | typedef uint32_t b_elf_word; /* Elf_Word. */ |
| 193 | typedef int32_t b_elf_sword; /* Elf_Sword. */ |
| 194 | |
| 195 | #if BACKTRACE_ELF_SIZE == 32 |
| 196 | |
| 197 | typedef uint32_t b_elf_addr; /* Elf_Addr. */ |
| 198 | typedef uint32_t b_elf_off; /* Elf_Off. */ |
| 199 | |
| 200 | typedef uint32_t b_elf_wxword; /* 32-bit Elf_Word, 64-bit ELF_Xword. */ |
| 201 | |
| 202 | #else |
| 203 | |
| 204 | typedef uint64_t b_elf_addr; /* Elf_Addr. */ |
| 205 | typedef uint64_t b_elf_off; /* Elf_Off. */ |
| 206 | typedef uint64_t b_elf_xword; /* Elf_Xword. */ |
| 207 | typedef int64_t b_elf_sxword; /* Elf_Sxword. */ |
| 208 | |
| 209 | typedef uint64_t b_elf_wxword; /* 32-bit Elf_Word, 64-bit ELF_Xword. */ |
| 210 | |
| 211 | #endif |
| 212 | |
| 213 | /* Data structures and associated constants. */ |
| 214 | |
| 215 | #define EI_NIDENT 16 |
| 216 | |
| 217 | typedef struct { |
| 218 | unsigned char e_ident[EI_NIDENT]; /* ELF "magic number" */ |
| 219 | b_elf_half e_type; /* Identifies object file type */ |
| 220 | b_elf_half e_machine; /* Specifies required architecture */ |
| 221 | b_elf_word e_version; /* Identifies object file version */ |
| 222 | b_elf_addr e_entry; /* Entry point virtual address */ |
| 223 | b_elf_off e_phoff; /* Program header table file offset */ |
| 224 | b_elf_off e_shoff; /* Section header table file offset */ |
| 225 | b_elf_word e_flags; /* Processor-specific flags */ |
| 226 | b_elf_half e_ehsize; /* ELF header size in bytes */ |
| 227 | b_elf_half e_phentsize; /* Program header table entry size */ |
| 228 | b_elf_half e_phnum; /* Program header table entry count */ |
| 229 | b_elf_half e_shentsize; /* Section header table entry size */ |
| 230 | b_elf_half e_shnum; /* Section header table entry count */ |
| 231 | b_elf_half e_shstrndx; /* Section header string table index */ |
| 232 | } b_elf_ehdr; /* Elf_Ehdr. */ |
| 233 | |
| 234 | #define EI_MAG0 0 |
| 235 | #define EI_MAG1 1 |
| 236 | #define EI_MAG2 2 |
| 237 | #define EI_MAG3 3 |
| 238 | #define EI_CLASS 4 |
| 239 | #define EI_DATA 5 |
| 240 | #define EI_VERSION 6 |
| 241 | |
| 242 | #define ELFMAG0 0x7f |
| 243 | #define ELFMAG1 'E' |
| 244 | #define ELFMAG2 'L' |
| 245 | #define ELFMAG3 'F' |
| 246 | |
| 247 | #define ELFCLASS32 1 |
| 248 | #define ELFCLASS64 2 |
| 249 | |
| 250 | #define ELFDATA2LSB 1 |
| 251 | #define ELFDATA2MSB 2 |
| 252 | |
| 253 | #define EV_CURRENT 1 |
| 254 | |
| 255 | #define ET_DYN 3 |
| 256 | |
| 257 | #define EM_PPC64 21 |
| 258 | #define EF_PPC64_ABI 3 |
| 259 | |
| 260 | typedef struct { |
| 261 | b_elf_word sh_name; /* Section name, index in string tbl */ |
| 262 | b_elf_word sh_type; /* Type of section */ |
| 263 | b_elf_wxword sh_flags; /* Miscellaneous section attributes */ |
| 264 | b_elf_addr sh_addr; /* Section virtual addr at execution */ |
| 265 | b_elf_off sh_offset; /* Section file offset */ |
| 266 | b_elf_wxword sh_size; /* Size of section in bytes */ |
| 267 | b_elf_word sh_link; /* Index of another section */ |
| 268 | b_elf_word sh_info; /* Additional section information */ |
| 269 | b_elf_wxword sh_addralign; /* Section alignment */ |
| 270 | b_elf_wxword sh_entsize; /* Entry size if section holds table */ |
| 271 | } b_elf_shdr; /* Elf_Shdr. */ |
| 272 | |
| 273 | #define SHN_UNDEF 0x0000 /* Undefined section */ |
| 274 | #define SHN_LORESERVE 0xFF00 /* Begin range of reserved indices */ |
| 275 | #define SHN_XINDEX 0xFFFF /* Section index is held elsewhere */ |
| 276 | |
| 277 | #define SHT_PROGBITS 1 |
| 278 | #define SHT_SYMTAB 2 |
| 279 | #define SHT_STRTAB 3 |
| 280 | #define SHT_DYNSYM 11 |
| 281 | |
| 282 | #define SHF_COMPRESSED 0x800 |
| 283 | |
| 284 | #if BACKTRACE_ELF_SIZE == 32 |
| 285 | |
| 286 | typedef struct |
| 287 | { |
| 288 | b_elf_word st_name; /* Symbol name, index in string tbl */ |
| 289 | b_elf_addr st_value; /* Symbol value */ |
| 290 | b_elf_word st_size; /* Symbol size */ |
| 291 | unsigned char st_info; /* Symbol binding and type */ |
| 292 | unsigned char st_other; /* Visibility and other data */ |
| 293 | b_elf_half st_shndx; /* Symbol section index */ |
| 294 | } b_elf_sym; /* Elf_Sym. */ |
| 295 | |
| 296 | #else /* BACKTRACE_ELF_SIZE != 32 */ |
| 297 | |
| 298 | typedef struct |
| 299 | { |
| 300 | b_elf_word st_name; /* Symbol name, index in string tbl */ |
| 301 | unsigned char st_info; /* Symbol binding and type */ |
| 302 | unsigned char st_other; /* Visibility and other data */ |
| 303 | b_elf_half st_shndx; /* Symbol section index */ |
| 304 | b_elf_addr st_value; /* Symbol value */ |
| 305 | b_elf_xword st_size; /* Symbol size */ |
| 306 | } b_elf_sym; /* Elf_Sym. */ |
| 307 | |
| 308 | #endif /* BACKTRACE_ELF_SIZE != 32 */ |
| 309 | |
| 310 | #define STT_OBJECT 1 |
| 311 | #define STT_FUNC 2 |
| 312 | |
| 313 | typedef struct |
| 314 | { |
| 315 | uint32_t namesz; |
| 316 | uint32_t descsz; |
| 317 | uint32_t type; |
| 318 | char name[1]; |
| 319 | } b_elf_note; |
| 320 | |
| 321 | #define NT_GNU_BUILD_ID 3 |
| 322 | |
| 323 | #if BACKTRACE_ELF_SIZE == 32 |
| 324 | |
| 325 | typedef struct |
| 326 | { |
| 327 | b_elf_word ch_type; /* Compresstion algorithm */ |
| 328 | b_elf_word ch_size; /* Uncompressed size */ |
| 329 | b_elf_word ch_addralign; /* Alignment for uncompressed data */ |
| 330 | } b_elf_chdr; /* Elf_Chdr */ |
| 331 | |
| 332 | #else /* BACKTRACE_ELF_SIZE != 32 */ |
| 333 | |
| 334 | typedef struct |
| 335 | { |
| 336 | b_elf_word ch_type; /* Compression algorithm */ |
| 337 | b_elf_word ch_reserved; /* Reserved */ |
| 338 | b_elf_xword ch_size; /* Uncompressed size */ |
| 339 | b_elf_xword ch_addralign; /* Alignment for uncompressed data */ |
| 340 | } b_elf_chdr; /* Elf_Chdr */ |
| 341 | |
| 342 | #endif /* BACKTRACE_ELF_SIZE != 32 */ |
| 343 | |
| 344 | #define ELFCOMPRESS_ZLIB 1 |
| 345 | #define ELFCOMPRESS_ZSTD 2 |
| 346 | |
| 347 | /* Names of sections, indexed by enum dwarf_section in internal.h. */ |
| 348 | |
| 349 | static const char * const dwarf_section_names[DEBUG_MAX] = |
| 350 | { |
| 351 | ".debug_info" , |
| 352 | ".debug_line" , |
| 353 | ".debug_abbrev" , |
| 354 | ".debug_ranges" , |
| 355 | ".debug_str" , |
| 356 | ".debug_addr" , |
| 357 | ".debug_str_offsets" , |
| 358 | ".debug_line_str" , |
| 359 | ".debug_rnglists" |
| 360 | }; |
| 361 | |
| 362 | /* Information we gather for the sections we care about. */ |
| 363 | |
| 364 | struct debug_section_info |
| 365 | { |
| 366 | /* Section file offset. */ |
| 367 | off_t offset; |
| 368 | /* Section size. */ |
| 369 | size_t size; |
| 370 | /* Section contents, after read from file. */ |
| 371 | const unsigned char *data; |
| 372 | /* Whether the SHF_COMPRESSED flag is set for the section. */ |
| 373 | int compressed; |
| 374 | }; |
| 375 | |
| 376 | /* Information we keep for an ELF symbol. */ |
| 377 | |
| 378 | struct elf_symbol |
| 379 | { |
| 380 | /* The name of the symbol. */ |
| 381 | const char *name; |
| 382 | /* The address of the symbol. */ |
| 383 | uintptr_t address; |
| 384 | /* The size of the symbol. */ |
| 385 | size_t size; |
| 386 | }; |
| 387 | |
| 388 | /* Information to pass to elf_syminfo. */ |
| 389 | |
| 390 | struct elf_syminfo_data |
| 391 | { |
| 392 | /* Symbols for the next module. */ |
| 393 | struct elf_syminfo_data *next; |
| 394 | /* The ELF symbols, sorted by address. */ |
| 395 | struct elf_symbol *symbols; |
| 396 | /* The number of symbols. */ |
| 397 | size_t count; |
| 398 | }; |
| 399 | |
| 400 | /* A view that works for either a file or memory. */ |
| 401 | |
| 402 | struct elf_view |
| 403 | { |
| 404 | struct backtrace_view view; |
| 405 | int release; /* If non-zero, must call backtrace_release_view. */ |
| 406 | }; |
| 407 | |
| 408 | /* Information about PowerPC64 ELFv1 .opd section. */ |
| 409 | |
| 410 | struct elf_ppc64_opd_data |
| 411 | { |
| 412 | /* Address of the .opd section. */ |
| 413 | b_elf_addr addr; |
| 414 | /* Section data. */ |
| 415 | const char *data; |
| 416 | /* Size of the .opd section. */ |
| 417 | size_t size; |
| 418 | /* Corresponding section view. */ |
| 419 | struct elf_view view; |
| 420 | }; |
| 421 | |
| 422 | /* Create a view of SIZE bytes from DESCRIPTOR/MEMORY at OFFSET. */ |
| 423 | |
| 424 | static int |
| 425 | elf_get_view (struct backtrace_state *state, int descriptor, |
| 426 | const unsigned char *memory, size_t memory_size, off_t offset, |
| 427 | uint64_t size, backtrace_error_callback error_callback, |
| 428 | void *data, struct elf_view *view) |
| 429 | { |
| 430 | if (memory == NULL) |
| 431 | { |
| 432 | view->release = 1; |
| 433 | return backtrace_get_view (state, descriptor, offset, size, |
| 434 | error_callback, data, view: &view->view); |
| 435 | } |
| 436 | else |
| 437 | { |
| 438 | if ((uint64_t) offset + size > (uint64_t) memory_size) |
| 439 | { |
| 440 | error_callback (data, "out of range for in-memory file" , 0); |
| 441 | return 0; |
| 442 | } |
| 443 | view->view.data = (const void *) (memory + offset); |
| 444 | view->view.base = NULL; |
| 445 | view->view.len = size; |
| 446 | view->release = 0; |
| 447 | return 1; |
| 448 | } |
| 449 | } |
| 450 | |
| 451 | /* Release a view read by elf_get_view. */ |
| 452 | |
| 453 | static void |
| 454 | elf_release_view (struct backtrace_state *state, struct elf_view *view, |
| 455 | backtrace_error_callback error_callback, void *data) |
| 456 | { |
| 457 | if (view->release) |
| 458 | backtrace_release_view (state, view: &view->view, error_callback, data); |
| 459 | } |
| 460 | |
| 461 | /* Compute the CRC-32 of BUF/LEN. This uses the CRC used for |
| 462 | .gnu_debuglink files. */ |
| 463 | |
| 464 | static uint32_t |
| 465 | elf_crc32 (uint32_t crc, const unsigned char *buf, size_t len) |
| 466 | { |
| 467 | static const uint32_t crc32_table[256] = |
| 468 | { |
| 469 | 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, |
| 470 | 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, |
| 471 | 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, |
| 472 | 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, |
| 473 | 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, |
| 474 | 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, |
| 475 | 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, |
| 476 | 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, |
| 477 | 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, |
| 478 | 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, |
| 479 | 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, |
| 480 | 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, |
| 481 | 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, |
| 482 | 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, |
| 483 | 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, |
| 484 | 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, |
| 485 | 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, |
| 486 | 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, |
| 487 | 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, |
| 488 | 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, |
| 489 | 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, |
| 490 | 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, |
| 491 | 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, |
| 492 | 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, |
| 493 | 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, |
| 494 | 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, |
| 495 | 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, |
| 496 | 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, |
| 497 | 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, |
| 498 | 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, |
| 499 | 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, |
| 500 | 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, |
| 501 | 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, |
| 502 | 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, |
| 503 | 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, |
| 504 | 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, |
| 505 | 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, |
| 506 | 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, |
| 507 | 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, |
| 508 | 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, |
| 509 | 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, |
| 510 | 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, |
| 511 | 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, |
| 512 | 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, |
| 513 | 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, |
| 514 | 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, |
| 515 | 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, |
| 516 | 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, |
| 517 | 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, |
| 518 | 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, |
| 519 | 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, |
| 520 | 0x2d02ef8d |
| 521 | }; |
| 522 | const unsigned char *end; |
| 523 | |
| 524 | crc = ~crc; |
| 525 | for (end = buf + len; buf < end; ++ buf) |
| 526 | crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8); |
| 527 | return ~crc; |
| 528 | } |
| 529 | |
| 530 | /* Return the CRC-32 of the entire file open at DESCRIPTOR. */ |
| 531 | |
| 532 | static uint32_t |
| 533 | elf_crc32_file (struct backtrace_state *state, int descriptor, |
| 534 | backtrace_error_callback error_callback, void *data) |
| 535 | { |
| 536 | struct stat st; |
| 537 | struct backtrace_view file_view; |
| 538 | uint32_t ret; |
| 539 | |
| 540 | if (fstat (fd: descriptor, buf: &st) < 0) |
| 541 | { |
| 542 | error_callback (data, "fstat" , errno); |
| 543 | return 0; |
| 544 | } |
| 545 | |
| 546 | if (!backtrace_get_view (state, descriptor, offset: 0, size: st.st_size, error_callback, |
| 547 | data, view: &file_view)) |
| 548 | return 0; |
| 549 | |
| 550 | ret = elf_crc32 (crc: 0, buf: (const unsigned char *) file_view.data, len: st.st_size); |
| 551 | |
| 552 | backtrace_release_view (state, view: &file_view, error_callback, data); |
| 553 | |
| 554 | return ret; |
| 555 | } |
| 556 | |
| 557 | /* A dummy callback function used when we can't find a symbol |
| 558 | table. */ |
| 559 | |
| 560 | static void |
| 561 | elf_nosyms (struct backtrace_state *state ATTRIBUTE_UNUSED, |
| 562 | uintptr_t addr ATTRIBUTE_UNUSED, |
| 563 | backtrace_syminfo_callback callback ATTRIBUTE_UNUSED, |
| 564 | backtrace_error_callback error_callback, void *data) |
| 565 | { |
| 566 | error_callback (data, "no symbol table in ELF executable" , -1); |
| 567 | } |
| 568 | |
| 569 | /* A callback function used when we can't find any debug info. */ |
| 570 | |
| 571 | static int |
| 572 | elf_nodebug (struct backtrace_state *state, uintptr_t pc, |
| 573 | backtrace_full_callback callback, |
| 574 | backtrace_error_callback error_callback, void *data) |
| 575 | { |
| 576 | if (state->syminfo_fn != NULL && state->syminfo_fn != elf_nosyms) |
| 577 | { |
| 578 | struct backtrace_call_full bdata; |
| 579 | |
| 580 | /* Fetch symbol information so that we can least get the |
| 581 | function name. */ |
| 582 | |
| 583 | bdata.full_callback = callback; |
| 584 | bdata.full_error_callback = error_callback; |
| 585 | bdata.full_data = data; |
| 586 | bdata.ret = 0; |
| 587 | state->syminfo_fn (state, pc, backtrace_syminfo_to_full_callback, |
| 588 | backtrace_syminfo_to_full_error_callback, &bdata); |
| 589 | return bdata.ret; |
| 590 | } |
| 591 | |
| 592 | error_callback (data, "no debug info in ELF executable (make sure to compile with -g)" , -1); |
| 593 | return 0; |
| 594 | } |
| 595 | |
| 596 | /* Compare struct elf_symbol for qsort. */ |
| 597 | |
| 598 | static int |
| 599 | elf_symbol_compare (const void *v1, const void *v2) |
| 600 | { |
| 601 | const struct elf_symbol *e1 = (const struct elf_symbol *) v1; |
| 602 | const struct elf_symbol *e2 = (const struct elf_symbol *) v2; |
| 603 | |
| 604 | if (e1->address < e2->address) |
| 605 | return -1; |
| 606 | else if (e1->address > e2->address) |
| 607 | return 1; |
| 608 | else |
| 609 | return 0; |
| 610 | } |
| 611 | |
| 612 | /* Compare an ADDR against an elf_symbol for bsearch. We allocate one |
| 613 | extra entry in the array so that this can look safely at the next |
| 614 | entry. */ |
| 615 | |
| 616 | static int |
| 617 | elf_symbol_search (const void *vkey, const void *ventry) |
| 618 | { |
| 619 | const uintptr_t *key = (const uintptr_t *) vkey; |
| 620 | const struct elf_symbol *entry = (const struct elf_symbol *) ventry; |
| 621 | uintptr_t addr; |
| 622 | |
| 623 | addr = *key; |
| 624 | if (addr < entry->address) |
| 625 | return -1; |
| 626 | else if (addr >= entry->address + entry->size) |
| 627 | return 1; |
| 628 | else |
| 629 | return 0; |
| 630 | } |
| 631 | |
| 632 | /* Initialize the symbol table info for elf_syminfo. */ |
| 633 | |
| 634 | static int |
| 635 | elf_initialize_syminfo (struct backtrace_state *state, |
| 636 | struct libbacktrace_base_address base_address, |
| 637 | const unsigned char *symtab_data, size_t symtab_size, |
| 638 | const unsigned char *strtab, size_t strtab_size, |
| 639 | backtrace_error_callback error_callback, |
| 640 | void *data, struct elf_syminfo_data *sdata, |
| 641 | struct elf_ppc64_opd_data *opd) |
| 642 | { |
| 643 | size_t sym_count; |
| 644 | const b_elf_sym *sym; |
| 645 | size_t elf_symbol_count; |
| 646 | size_t elf_symbol_size; |
| 647 | struct elf_symbol *elf_symbols; |
| 648 | size_t i; |
| 649 | unsigned int j; |
| 650 | |
| 651 | sym_count = symtab_size / sizeof (b_elf_sym); |
| 652 | |
| 653 | /* We only care about function symbols. Count them. */ |
| 654 | sym = (const b_elf_sym *) symtab_data; |
| 655 | elf_symbol_count = 0; |
| 656 | for (i = 0; i < sym_count; ++i, ++sym) |
| 657 | { |
| 658 | int info; |
| 659 | |
| 660 | info = sym->st_info & 0xf; |
| 661 | if ((info == STT_FUNC || info == STT_OBJECT) |
| 662 | && sym->st_shndx != SHN_UNDEF) |
| 663 | ++elf_symbol_count; |
| 664 | } |
| 665 | |
| 666 | elf_symbol_size = elf_symbol_count * sizeof (struct elf_symbol); |
| 667 | elf_symbols = ((struct elf_symbol *) |
| 668 | backtrace_alloc (state, size: elf_symbol_size, error_callback, |
| 669 | data)); |
| 670 | if (elf_symbols == NULL) |
| 671 | return 0; |
| 672 | |
| 673 | sym = (const b_elf_sym *) symtab_data; |
| 674 | j = 0; |
| 675 | for (i = 0; i < sym_count; ++i, ++sym) |
| 676 | { |
| 677 | int info; |
| 678 | |
| 679 | info = sym->st_info & 0xf; |
| 680 | if (info != STT_FUNC && info != STT_OBJECT) |
| 681 | continue; |
| 682 | if (sym->st_shndx == SHN_UNDEF) |
| 683 | continue; |
| 684 | if (sym->st_name >= strtab_size) |
| 685 | { |
| 686 | error_callback (data, "symbol string index out of range" , 0); |
| 687 | backtrace_free (state, mem: elf_symbols, size: elf_symbol_size, error_callback, |
| 688 | data); |
| 689 | return 0; |
| 690 | } |
| 691 | elf_symbols[j].name = (const char *) strtab + sym->st_name; |
| 692 | /* Special case PowerPC64 ELFv1 symbols in .opd section, if the symbol |
| 693 | is a function descriptor, read the actual code address from the |
| 694 | descriptor. */ |
| 695 | if (opd |
| 696 | && sym->st_value >= opd->addr |
| 697 | && sym->st_value < opd->addr + opd->size) |
| 698 | elf_symbols[j].address |
| 699 | = *(const b_elf_addr *) (opd->data + (sym->st_value - opd->addr)); |
| 700 | else |
| 701 | elf_symbols[j].address = sym->st_value; |
| 702 | elf_symbols[j].address = |
| 703 | libbacktrace_add_base (elf_symbols[j].address, base_address); |
| 704 | elf_symbols[j].size = sym->st_size; |
| 705 | ++j; |
| 706 | } |
| 707 | |
| 708 | backtrace_qsort (base: elf_symbols, count: elf_symbol_count, size: sizeof (struct elf_symbol), |
| 709 | compar: elf_symbol_compare); |
| 710 | |
| 711 | sdata->next = NULL; |
| 712 | sdata->symbols = elf_symbols; |
| 713 | sdata->count = elf_symbol_count; |
| 714 | |
| 715 | return 1; |
| 716 | } |
| 717 | |
| 718 | /* Add EDATA to the list in STATE. */ |
| 719 | |
| 720 | static void |
| 721 | elf_add_syminfo_data (struct backtrace_state *state, |
| 722 | struct elf_syminfo_data *edata) |
| 723 | { |
| 724 | if (!state->threaded) |
| 725 | { |
| 726 | struct elf_syminfo_data **pp; |
| 727 | |
| 728 | for (pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data; |
| 729 | *pp != NULL; |
| 730 | pp = &(*pp)->next) |
| 731 | ; |
| 732 | *pp = edata; |
| 733 | } |
| 734 | else |
| 735 | { |
| 736 | while (1) |
| 737 | { |
| 738 | struct elf_syminfo_data **pp; |
| 739 | |
| 740 | pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data; |
| 741 | |
| 742 | while (1) |
| 743 | { |
| 744 | struct elf_syminfo_data *p; |
| 745 | |
| 746 | p = backtrace_atomic_load_pointer (pp); |
| 747 | |
| 748 | if (p == NULL) |
| 749 | break; |
| 750 | |
| 751 | pp = &p->next; |
| 752 | } |
| 753 | |
| 754 | if (__sync_bool_compare_and_swap (pp, NULL, edata)) |
| 755 | break; |
| 756 | } |
| 757 | } |
| 758 | } |
| 759 | |
| 760 | /* Return the symbol name and value for an ADDR. */ |
| 761 | |
| 762 | static void |
| 763 | elf_syminfo (struct backtrace_state *state, uintptr_t addr, |
| 764 | backtrace_syminfo_callback callback, |
| 765 | backtrace_error_callback error_callback ATTRIBUTE_UNUSED, |
| 766 | void *data) |
| 767 | { |
| 768 | struct elf_syminfo_data *edata; |
| 769 | struct elf_symbol *sym = NULL; |
| 770 | |
| 771 | if (!state->threaded) |
| 772 | { |
| 773 | for (edata = (struct elf_syminfo_data *) state->syminfo_data; |
| 774 | edata != NULL; |
| 775 | edata = edata->next) |
| 776 | { |
| 777 | sym = ((struct elf_symbol *) |
| 778 | bsearch (key: &addr, base: edata->symbols, nmemb: edata->count, |
| 779 | size: sizeof (struct elf_symbol), compar: elf_symbol_search)); |
| 780 | if (sym != NULL) |
| 781 | break; |
| 782 | } |
| 783 | } |
| 784 | else |
| 785 | { |
| 786 | struct elf_syminfo_data **pp; |
| 787 | |
| 788 | pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data; |
| 789 | while (1) |
| 790 | { |
| 791 | edata = backtrace_atomic_load_pointer (pp); |
| 792 | if (edata == NULL) |
| 793 | break; |
| 794 | |
| 795 | sym = ((struct elf_symbol *) |
| 796 | bsearch (key: &addr, base: edata->symbols, nmemb: edata->count, |
| 797 | size: sizeof (struct elf_symbol), compar: elf_symbol_search)); |
| 798 | if (sym != NULL) |
| 799 | break; |
| 800 | |
| 801 | pp = &edata->next; |
| 802 | } |
| 803 | } |
| 804 | |
| 805 | if (sym == NULL) |
| 806 | callback (data, addr, NULL, 0, 0); |
| 807 | else |
| 808 | callback (data, addr, sym->name, sym->address, sym->size); |
| 809 | } |
| 810 | |
| 811 | /* Return whether FILENAME is a symlink. */ |
| 812 | |
| 813 | static int |
| 814 | elf_is_symlink (const char *filename) |
| 815 | { |
| 816 | struct stat st; |
| 817 | |
| 818 | if (lstat (file: filename, buf: &st) < 0) |
| 819 | return 0; |
| 820 | return S_ISLNK (st.st_mode); |
| 821 | } |
| 822 | |
| 823 | /* Return the results of reading the symlink FILENAME in a buffer |
| 824 | allocated by backtrace_alloc. Return the length of the buffer in |
| 825 | *LEN. */ |
| 826 | |
| 827 | static char * |
| 828 | elf_readlink (struct backtrace_state *state, const char *filename, |
| 829 | backtrace_error_callback error_callback, void *data, |
| 830 | size_t *plen) |
| 831 | { |
| 832 | size_t len; |
| 833 | char *buf; |
| 834 | |
| 835 | len = 128; |
| 836 | while (1) |
| 837 | { |
| 838 | ssize_t rl; |
| 839 | |
| 840 | buf = backtrace_alloc (state, size: len, error_callback, data); |
| 841 | if (buf == NULL) |
| 842 | return NULL; |
| 843 | rl = readlink (path: filename, buf: buf, len: len); |
| 844 | if (rl < 0) |
| 845 | { |
| 846 | backtrace_free (state, mem: buf, size: len, error_callback, data); |
| 847 | return NULL; |
| 848 | } |
| 849 | if ((size_t) rl < len - 1) |
| 850 | { |
| 851 | buf[rl] = '\0'; |
| 852 | *plen = len; |
| 853 | return buf; |
| 854 | } |
| 855 | backtrace_free (state, mem: buf, size: len, error_callback, data); |
| 856 | len *= 2; |
| 857 | } |
| 858 | } |
| 859 | |
| 860 | #define SYSTEM_BUILD_ID_DIR "/usr/lib/debug/.build-id/" |
| 861 | |
| 862 | /* Open a separate debug info file, using the build ID to find it. |
| 863 | Returns an open file descriptor, or -1. |
| 864 | |
| 865 | The GDB manual says that the only place gdb looks for a debug file |
| 866 | when the build ID is known is in /usr/lib/debug/.build-id. */ |
| 867 | |
| 868 | static int |
| 869 | elf_open_debugfile_by_buildid (struct backtrace_state *state, |
| 870 | const char *buildid_data, size_t buildid_size, |
| 871 | backtrace_error_callback error_callback, |
| 872 | void *data) |
| 873 | { |
| 874 | const char * const prefix = SYSTEM_BUILD_ID_DIR; |
| 875 | const size_t prefix_len = strlen (s: prefix); |
| 876 | const char * const suffix = ".debug" ; |
| 877 | const size_t suffix_len = strlen (s: suffix); |
| 878 | size_t len; |
| 879 | char *bd_filename; |
| 880 | char *t; |
| 881 | size_t i; |
| 882 | int ret; |
| 883 | int does_not_exist; |
| 884 | |
| 885 | len = prefix_len + buildid_size * 2 + suffix_len + 2; |
| 886 | bd_filename = backtrace_alloc (state, size: len, error_callback, data); |
| 887 | if (bd_filename == NULL) |
| 888 | return -1; |
| 889 | |
| 890 | t = bd_filename; |
| 891 | memcpy (dest: t, src: prefix, n: prefix_len); |
| 892 | t += prefix_len; |
| 893 | for (i = 0; i < buildid_size; i++) |
| 894 | { |
| 895 | unsigned char b; |
| 896 | unsigned char nib; |
| 897 | |
| 898 | b = (unsigned char) buildid_data[i]; |
| 899 | nib = (b & 0xf0) >> 4; |
| 900 | *t++ = nib < 10 ? '0' + nib : 'a' + nib - 10; |
| 901 | nib = b & 0x0f; |
| 902 | *t++ = nib < 10 ? '0' + nib : 'a' + nib - 10; |
| 903 | if (i == 0) |
| 904 | *t++ = '/'; |
| 905 | } |
| 906 | memcpy (dest: t, src: suffix, n: suffix_len); |
| 907 | t[suffix_len] = '\0'; |
| 908 | |
| 909 | ret = backtrace_open (filename: bd_filename, error_callback, data, does_not_exist: &does_not_exist); |
| 910 | |
| 911 | backtrace_free (state, mem: bd_filename, size: len, error_callback, data); |
| 912 | |
| 913 | /* gdb checks that the debuginfo file has the same build ID note. |
| 914 | That seems kind of pointless to me--why would it have the right |
| 915 | name but not the right build ID?--so skipping the check. */ |
| 916 | |
| 917 | return ret; |
| 918 | } |
| 919 | |
| 920 | /* Try to open a file whose name is PREFIX (length PREFIX_LEN) |
| 921 | concatenated with PREFIX2 (length PREFIX2_LEN) concatenated with |
| 922 | DEBUGLINK_NAME. Returns an open file descriptor, or -1. */ |
| 923 | |
| 924 | static int |
| 925 | elf_try_debugfile (struct backtrace_state *state, const char *prefix, |
| 926 | size_t prefix_len, const char *prefix2, size_t prefix2_len, |
| 927 | const char *debuglink_name, |
| 928 | backtrace_error_callback error_callback, void *data) |
| 929 | { |
| 930 | size_t debuglink_len; |
| 931 | size_t try_len; |
| 932 | char *try; |
| 933 | int does_not_exist; |
| 934 | int ret; |
| 935 | |
| 936 | debuglink_len = strlen (s: debuglink_name); |
| 937 | try_len = prefix_len + prefix2_len + debuglink_len + 1; |
| 938 | try = backtrace_alloc (state, try_len, error_callback, data); |
| 939 | if (try == NULL) |
| 940 | return -1; |
| 941 | |
| 942 | memcpy (try, prefix, prefix_len); |
| 943 | memcpy (try + prefix_len, prefix2, prefix2_len); |
| 944 | memcpy (try + prefix_len + prefix2_len, debuglink_name, debuglink_len); |
| 945 | try[prefix_len + prefix2_len + debuglink_len] = '\0'; |
| 946 | |
| 947 | ret = backtrace_open (try, error_callback, data, &does_not_exist); |
| 948 | |
| 949 | backtrace_free (state, try, try_len, error_callback, data); |
| 950 | |
| 951 | return ret; |
| 952 | } |
| 953 | |
| 954 | /* Find a separate debug info file, using the debuglink section data |
| 955 | to find it. Returns an open file descriptor, or -1. */ |
| 956 | |
| 957 | static int |
| 958 | elf_find_debugfile_by_debuglink (struct backtrace_state *state, |
| 959 | const char *filename, |
| 960 | const char *debuglink_name, |
| 961 | backtrace_error_callback error_callback, |
| 962 | void *data) |
| 963 | { |
| 964 | int ret; |
| 965 | char *alc; |
| 966 | size_t alc_len; |
| 967 | const char *slash; |
| 968 | int ddescriptor; |
| 969 | const char *prefix; |
| 970 | size_t prefix_len; |
| 971 | |
| 972 | /* Resolve symlinks in FILENAME. Since FILENAME is fairly likely to |
| 973 | be /proc/self/exe, symlinks are common. We don't try to resolve |
| 974 | the whole path name, just the base name. */ |
| 975 | ret = -1; |
| 976 | alc = NULL; |
| 977 | alc_len = 0; |
| 978 | while (elf_is_symlink (filename)) |
| 979 | { |
| 980 | char *new_buf; |
| 981 | size_t new_len; |
| 982 | |
| 983 | new_buf = elf_readlink (state, filename, error_callback, data, plen: &new_len); |
| 984 | if (new_buf == NULL) |
| 985 | break; |
| 986 | |
| 987 | if (new_buf[0] == '/') |
| 988 | filename = new_buf; |
| 989 | else |
| 990 | { |
| 991 | slash = strrchr (s: filename, c: '/'); |
| 992 | if (slash == NULL) |
| 993 | filename = new_buf; |
| 994 | else |
| 995 | { |
| 996 | size_t clen; |
| 997 | char *c; |
| 998 | |
| 999 | slash++; |
| 1000 | clen = slash - filename + strlen (s: new_buf) + 1; |
| 1001 | c = backtrace_alloc (state, size: clen, error_callback, data); |
| 1002 | if (c == NULL) |
| 1003 | goto done; |
| 1004 | |
| 1005 | memcpy (dest: c, src: filename, n: slash - filename); |
| 1006 | memcpy (dest: c + (slash - filename), src: new_buf, n: strlen (s: new_buf)); |
| 1007 | c[slash - filename + strlen (s: new_buf)] = '\0'; |
| 1008 | backtrace_free (state, mem: new_buf, size: new_len, error_callback, data); |
| 1009 | filename = c; |
| 1010 | new_buf = c; |
| 1011 | new_len = clen; |
| 1012 | } |
| 1013 | } |
| 1014 | |
| 1015 | if (alc != NULL) |
| 1016 | backtrace_free (state, mem: alc, size: alc_len, error_callback, data); |
| 1017 | alc = new_buf; |
| 1018 | alc_len = new_len; |
| 1019 | } |
| 1020 | |
| 1021 | /* Look for DEBUGLINK_NAME in the same directory as FILENAME. */ |
| 1022 | |
| 1023 | slash = strrchr (s: filename, c: '/'); |
| 1024 | if (slash == NULL) |
| 1025 | { |
| 1026 | prefix = "" ; |
| 1027 | prefix_len = 0; |
| 1028 | } |
| 1029 | else |
| 1030 | { |
| 1031 | slash++; |
| 1032 | prefix = filename; |
| 1033 | prefix_len = slash - filename; |
| 1034 | } |
| 1035 | |
| 1036 | ddescriptor = elf_try_debugfile (state, prefix, prefix_len, prefix2: "" , prefix2_len: 0, |
| 1037 | debuglink_name, error_callback, data); |
| 1038 | if (ddescriptor >= 0) |
| 1039 | { |
| 1040 | ret = ddescriptor; |
| 1041 | goto done; |
| 1042 | } |
| 1043 | |
| 1044 | /* Look for DEBUGLINK_NAME in a .debug subdirectory of FILENAME. */ |
| 1045 | |
| 1046 | ddescriptor = elf_try_debugfile (state, prefix, prefix_len, prefix2: ".debug/" , |
| 1047 | prefix2_len: strlen (s: ".debug/" ), debuglink_name, |
| 1048 | error_callback, data); |
| 1049 | if (ddescriptor >= 0) |
| 1050 | { |
| 1051 | ret = ddescriptor; |
| 1052 | goto done; |
| 1053 | } |
| 1054 | |
| 1055 | /* Look for DEBUGLINK_NAME in /usr/lib/debug. */ |
| 1056 | |
| 1057 | ddescriptor = elf_try_debugfile (state, prefix: "/usr/lib/debug/" , |
| 1058 | prefix_len: strlen (s: "/usr/lib/debug/" ), prefix2: prefix, |
| 1059 | prefix2_len: prefix_len, debuglink_name, |
| 1060 | error_callback, data); |
| 1061 | if (ddescriptor >= 0) |
| 1062 | ret = ddescriptor; |
| 1063 | |
| 1064 | done: |
| 1065 | if (alc != NULL && alc_len > 0) |
| 1066 | backtrace_free (state, mem: alc, size: alc_len, error_callback, data); |
| 1067 | return ret; |
| 1068 | } |
| 1069 | |
| 1070 | /* Open a separate debug info file, using the debuglink section data |
| 1071 | to find it. Returns an open file descriptor, or -1. */ |
| 1072 | |
| 1073 | static int |
| 1074 | elf_open_debugfile_by_debuglink (struct backtrace_state *state, |
| 1075 | const char *filename, |
| 1076 | const char *debuglink_name, |
| 1077 | uint32_t debuglink_crc, |
| 1078 | backtrace_error_callback error_callback, |
| 1079 | void *data) |
| 1080 | { |
| 1081 | int ddescriptor; |
| 1082 | |
| 1083 | ddescriptor = elf_find_debugfile_by_debuglink (state, filename, |
| 1084 | debuglink_name, |
| 1085 | error_callback, data); |
| 1086 | if (ddescriptor < 0) |
| 1087 | return -1; |
| 1088 | |
| 1089 | if (debuglink_crc != 0) |
| 1090 | { |
| 1091 | uint32_t got_crc; |
| 1092 | |
| 1093 | got_crc = elf_crc32_file (state, descriptor: ddescriptor, error_callback, data); |
| 1094 | if (got_crc != debuglink_crc) |
| 1095 | { |
| 1096 | backtrace_close (descriptor: ddescriptor, error_callback, data); |
| 1097 | return -1; |
| 1098 | } |
| 1099 | } |
| 1100 | |
| 1101 | return ddescriptor; |
| 1102 | } |
| 1103 | |
| 1104 | /* A function useful for setting a breakpoint for an inflation failure |
| 1105 | when this code is compiled with -g. */ |
| 1106 | |
| 1107 | static void |
| 1108 | elf_uncompress_failed(void) |
| 1109 | { |
| 1110 | } |
| 1111 | |
| 1112 | /* *PVAL is the current value being read from the stream, and *PBITS |
| 1113 | is the number of valid bits. Ensure that *PVAL holds at least 15 |
| 1114 | bits by reading additional bits from *PPIN, up to PINEND, as |
| 1115 | needed. Updates *PPIN, *PVAL and *PBITS. Returns 1 on success, 0 |
| 1116 | on error. */ |
| 1117 | |
| 1118 | static int |
| 1119 | elf_fetch_bits (const unsigned char **ppin, const unsigned char *pinend, |
| 1120 | uint64_t *pval, unsigned int *pbits) |
| 1121 | { |
| 1122 | unsigned int bits; |
| 1123 | const unsigned char *pin; |
| 1124 | uint64_t val; |
| 1125 | uint32_t next; |
| 1126 | |
| 1127 | bits = *pbits; |
| 1128 | if (bits >= 15) |
| 1129 | return 1; |
| 1130 | pin = *ppin; |
| 1131 | val = *pval; |
| 1132 | |
| 1133 | if (unlikely (pinend - pin < 4)) |
| 1134 | { |
| 1135 | elf_uncompress_failed (); |
| 1136 | return 0; |
| 1137 | } |
| 1138 | |
| 1139 | #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) \ |
| 1140 | && defined(__ORDER_BIG_ENDIAN__) \ |
| 1141 | && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ \ |
| 1142 | || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) |
| 1143 | /* We've ensured that PIN is aligned. */ |
| 1144 | next = *(const uint32_t *)pin; |
| 1145 | |
| 1146 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
| 1147 | next = __builtin_bswap32 (next); |
| 1148 | #endif |
| 1149 | #else |
| 1150 | next = ((uint32_t)pin[0] |
| 1151 | | ((uint32_t)pin[1] << 8) |
| 1152 | | ((uint32_t)pin[2] << 16) |
| 1153 | | ((uint32_t)pin[3] << 24)); |
| 1154 | #endif |
| 1155 | |
| 1156 | val |= (uint64_t)next << bits; |
| 1157 | bits += 32; |
| 1158 | pin += 4; |
| 1159 | |
| 1160 | /* We will need the next four bytes soon. */ |
| 1161 | __builtin_prefetch (pin, 0, 0); |
| 1162 | |
| 1163 | *ppin = pin; |
| 1164 | *pval = val; |
| 1165 | *pbits = bits; |
| 1166 | return 1; |
| 1167 | } |
| 1168 | |
| 1169 | /* This is like elf_fetch_bits, but it fetchs the bits backward, and ensures at |
| 1170 | least 16 bits. This is for zstd. */ |
| 1171 | |
| 1172 | static int |
| 1173 | elf_fetch_bits_backward (const unsigned char **ppin, |
| 1174 | const unsigned char *pinend, |
| 1175 | uint64_t *pval, unsigned int *pbits) |
| 1176 | { |
| 1177 | unsigned int bits; |
| 1178 | const unsigned char *pin; |
| 1179 | uint64_t val; |
| 1180 | uint32_t next; |
| 1181 | |
| 1182 | bits = *pbits; |
| 1183 | if (bits >= 16) |
| 1184 | return 1; |
| 1185 | pin = *ppin; |
| 1186 | val = *pval; |
| 1187 | |
| 1188 | if (unlikely (pin <= pinend)) |
| 1189 | return 1; |
| 1190 | |
| 1191 | pin -= 4; |
| 1192 | |
| 1193 | #if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) \ |
| 1194 | && defined(__ORDER_BIG_ENDIAN__) \ |
| 1195 | && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ \ |
| 1196 | || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) |
| 1197 | /* We've ensured that PIN is aligned. */ |
| 1198 | next = *(const uint32_t *)pin; |
| 1199 | |
| 1200 | #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ |
| 1201 | next = __builtin_bswap32 (next); |
| 1202 | #endif |
| 1203 | #else |
| 1204 | next = ((uint32_t)pin[0] |
| 1205 | | ((uint32_t)pin[1] << 8) |
| 1206 | | ((uint32_t)pin[2] << 16) |
| 1207 | | ((uint32_t)pin[3] << 24)); |
| 1208 | #endif |
| 1209 | |
| 1210 | val <<= 32; |
| 1211 | val |= next; |
| 1212 | bits += 32; |
| 1213 | |
| 1214 | if (unlikely (pin < pinend)) |
| 1215 | { |
| 1216 | val >>= (pinend - pin) * 8; |
| 1217 | bits -= (pinend - pin) * 8; |
| 1218 | } |
| 1219 | |
| 1220 | *ppin = pin; |
| 1221 | *pval = val; |
| 1222 | *pbits = bits; |
| 1223 | return 1; |
| 1224 | } |
| 1225 | |
| 1226 | /* Initialize backward fetching when the bitstream starts with a 1 bit in the |
| 1227 | last byte in memory (which is the first one that we read). This is used by |
| 1228 | zstd decompression. Returns 1 on success, 0 on error. */ |
| 1229 | |
| 1230 | static int |
| 1231 | elf_fetch_backward_init (const unsigned char **ppin, |
| 1232 | const unsigned char *pinend, |
| 1233 | uint64_t *pval, unsigned int *pbits) |
| 1234 | { |
| 1235 | const unsigned char *pin; |
| 1236 | unsigned int stream_start; |
| 1237 | uint64_t val; |
| 1238 | unsigned int bits; |
| 1239 | |
| 1240 | pin = *ppin; |
| 1241 | stream_start = (unsigned int)*pin; |
| 1242 | if (unlikely (stream_start == 0)) |
| 1243 | { |
| 1244 | elf_uncompress_failed (); |
| 1245 | return 0; |
| 1246 | } |
| 1247 | val = 0; |
| 1248 | bits = 0; |
| 1249 | |
| 1250 | /* Align to a 32-bit boundary. */ |
| 1251 | while ((((uintptr_t)pin) & 3) != 0) |
| 1252 | { |
| 1253 | val <<= 8; |
| 1254 | val |= (uint64_t)*pin; |
| 1255 | bits += 8; |
| 1256 | --pin; |
| 1257 | } |
| 1258 | |
| 1259 | val <<= 8; |
| 1260 | val |= (uint64_t)*pin; |
| 1261 | bits += 8; |
| 1262 | |
| 1263 | *ppin = pin; |
| 1264 | *pval = val; |
| 1265 | *pbits = bits; |
| 1266 | if (!elf_fetch_bits_backward (ppin, pinend, pval, pbits)) |
| 1267 | return 0; |
| 1268 | |
| 1269 | *pbits -= __builtin_clz (stream_start) - (sizeof (unsigned int) - 1) * 8 + 1; |
| 1270 | |
| 1271 | if (!elf_fetch_bits_backward (ppin, pinend, pval, pbits)) |
| 1272 | return 0; |
| 1273 | |
| 1274 | return 1; |
| 1275 | } |
| 1276 | |
| 1277 | /* Huffman code tables, like the rest of the zlib format, are defined |
| 1278 | by RFC 1951. We store a Huffman code table as a series of tables |
| 1279 | stored sequentially in memory. Each entry in a table is 16 bits. |
| 1280 | The first, main, table has 256 entries. It is followed by a set of |
| 1281 | secondary tables of length 2 to 128 entries. The maximum length of |
| 1282 | a code sequence in the deflate format is 15 bits, so that is all we |
| 1283 | need. Each secondary table has an index, which is the offset of |
| 1284 | the table in the overall memory storage. |
| 1285 | |
| 1286 | The deflate format says that all codes of a given bit length are |
| 1287 | lexicographically consecutive. Perhaps we could have 130 values |
| 1288 | that require a 15-bit code, perhaps requiring three secondary |
| 1289 | tables of size 128. I don't know if this is actually possible, but |
| 1290 | it suggests that the maximum size required for secondary tables is |
| 1291 | 3 * 128 + 3 * 64 ... == 768. The zlib enough program reports 660 |
| 1292 | as the maximum. We permit 768, since in addition to the 256 for |
| 1293 | the primary table, with two bytes per entry, and with the two |
| 1294 | tables we need, that gives us a page. |
| 1295 | |
| 1296 | A single table entry needs to store a value or (for the main table |
| 1297 | only) the index and size of a secondary table. Values range from 0 |
| 1298 | to 285, inclusive. Secondary table indexes, per above, range from |
| 1299 | 0 to 510. For a value we need to store the number of bits we need |
| 1300 | to determine that value (one value may appear multiple times in the |
| 1301 | table), which is 1 to 8. For a secondary table we need to store |
| 1302 | the number of bits used to index into the table, which is 1 to 7. |
| 1303 | And of course we need 1 bit to decide whether we have a value or a |
| 1304 | secondary table index. So each entry needs 9 bits for value/table |
| 1305 | index, 3 bits for size, 1 bit what it is. For simplicity we use 16 |
| 1306 | bits per entry. */ |
| 1307 | |
| 1308 | /* Number of entries we allocate to for one code table. We get a page |
| 1309 | for the two code tables we need. */ |
| 1310 | |
| 1311 | #define ZLIB_HUFFMAN_TABLE_SIZE (1024) |
| 1312 | |
| 1313 | /* Bit masks and shifts for the values in the table. */ |
| 1314 | |
| 1315 | #define ZLIB_HUFFMAN_VALUE_MASK 0x01ff |
| 1316 | #define ZLIB_HUFFMAN_BITS_SHIFT 9 |
| 1317 | #define ZLIB_HUFFMAN_BITS_MASK 0x7 |
| 1318 | #define ZLIB_HUFFMAN_SECONDARY_SHIFT 12 |
| 1319 | |
| 1320 | /* For working memory while inflating we need two code tables, we need |
| 1321 | an array of code lengths (max value 15, so we use unsigned char), |
| 1322 | and an array of unsigned shorts used while building a table. The |
| 1323 | latter two arrays must be large enough to hold the maximum number |
| 1324 | of code lengths, which RFC 1951 defines as 286 + 30. */ |
| 1325 | |
| 1326 | #define ZLIB_TABLE_SIZE \ |
| 1327 | (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \ |
| 1328 | + (286 + 30) * sizeof (uint16_t) \ |
| 1329 | + (286 + 30) * sizeof (unsigned char)) |
| 1330 | |
| 1331 | #define ZLIB_TABLE_CODELEN_OFFSET \ |
| 1332 | (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t) \ |
| 1333 | + (286 + 30) * sizeof (uint16_t)) |
| 1334 | |
| 1335 | #define ZLIB_TABLE_WORK_OFFSET \ |
| 1336 | (2 * ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t)) |
| 1337 | |
| 1338 | #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE |
| 1339 | |
| 1340 | /* Used by the main function that generates the fixed table to learn |
| 1341 | the table size. */ |
| 1342 | static size_t final_next_secondary; |
| 1343 | |
| 1344 | #endif |
| 1345 | |
| 1346 | /* Build a Huffman code table from an array of lengths in CODES of |
| 1347 | length CODES_LEN. The table is stored into *TABLE. ZDEBUG_TABLE |
| 1348 | is the same as for elf_zlib_inflate, used to find some work space. |
| 1349 | Returns 1 on success, 0 on error. */ |
| 1350 | |
| 1351 | static int |
| 1352 | elf_zlib_inflate_table (unsigned char *codes, size_t codes_len, |
| 1353 | uint16_t *zdebug_table, uint16_t *table) |
| 1354 | { |
| 1355 | uint16_t count[16]; |
| 1356 | uint16_t start[16]; |
| 1357 | uint16_t prev[16]; |
| 1358 | uint16_t firstcode[7]; |
| 1359 | uint16_t *next; |
| 1360 | size_t i; |
| 1361 | size_t j; |
| 1362 | unsigned int code; |
| 1363 | size_t next_secondary; |
| 1364 | |
| 1365 | /* Count the number of code of each length. Set NEXT[val] to be the |
| 1366 | next value after VAL with the same bit length. */ |
| 1367 | |
| 1368 | next = (uint16_t *) (((unsigned char *) zdebug_table) |
| 1369 | + ZLIB_TABLE_WORK_OFFSET); |
| 1370 | |
| 1371 | memset (s: &count[0], c: 0, n: 16 * sizeof (uint16_t)); |
| 1372 | for (i = 0; i < codes_len; ++i) |
| 1373 | { |
| 1374 | if (unlikely (codes[i] >= 16)) |
| 1375 | { |
| 1376 | elf_uncompress_failed (); |
| 1377 | return 0; |
| 1378 | } |
| 1379 | |
| 1380 | if (count[codes[i]] == 0) |
| 1381 | { |
| 1382 | start[codes[i]] = i; |
| 1383 | prev[codes[i]] = i; |
| 1384 | } |
| 1385 | else |
| 1386 | { |
| 1387 | next[prev[codes[i]]] = i; |
| 1388 | prev[codes[i]] = i; |
| 1389 | } |
| 1390 | |
| 1391 | ++count[codes[i]]; |
| 1392 | } |
| 1393 | |
| 1394 | /* For each length, fill in the table for the codes of that |
| 1395 | length. */ |
| 1396 | |
| 1397 | memset (s: table, c: 0, ZLIB_HUFFMAN_TABLE_SIZE * sizeof (uint16_t)); |
| 1398 | |
| 1399 | /* Handle the values that do not require a secondary table. */ |
| 1400 | |
| 1401 | code = 0; |
| 1402 | for (j = 1; j <= 8; ++j) |
| 1403 | { |
| 1404 | unsigned int jcnt; |
| 1405 | unsigned int val; |
| 1406 | |
| 1407 | jcnt = count[j]; |
| 1408 | if (jcnt == 0) |
| 1409 | continue; |
| 1410 | |
| 1411 | if (unlikely (jcnt > (1U << j))) |
| 1412 | { |
| 1413 | elf_uncompress_failed (); |
| 1414 | return 0; |
| 1415 | } |
| 1416 | |
| 1417 | /* There are JCNT values that have this length, the values |
| 1418 | starting from START[j] continuing through NEXT[VAL]. Those |
| 1419 | values are assigned consecutive values starting at CODE. */ |
| 1420 | |
| 1421 | val = start[j]; |
| 1422 | for (i = 0; i < jcnt; ++i) |
| 1423 | { |
| 1424 | uint16_t tval; |
| 1425 | size_t ind; |
| 1426 | unsigned int incr; |
| 1427 | |
| 1428 | /* In the compressed bit stream, the value VAL is encoded as |
| 1429 | J bits with the value C. */ |
| 1430 | |
| 1431 | if (unlikely ((val & ~ZLIB_HUFFMAN_VALUE_MASK) != 0)) |
| 1432 | { |
| 1433 | elf_uncompress_failed (); |
| 1434 | return 0; |
| 1435 | } |
| 1436 | |
| 1437 | tval = val | ((j - 1) << ZLIB_HUFFMAN_BITS_SHIFT); |
| 1438 | |
| 1439 | /* The table lookup uses 8 bits. If J is less than 8, we |
| 1440 | don't know what the other bits will be. We need to fill |
| 1441 | in all possibilities in the table. Since the Huffman |
| 1442 | code is unambiguous, those entries can't be used for any |
| 1443 | other code. */ |
| 1444 | |
| 1445 | for (ind = code; ind < 0x100; ind += 1 << j) |
| 1446 | { |
| 1447 | if (unlikely (table[ind] != 0)) |
| 1448 | { |
| 1449 | elf_uncompress_failed (); |
| 1450 | return 0; |
| 1451 | } |
| 1452 | table[ind] = tval; |
| 1453 | } |
| 1454 | |
| 1455 | /* Advance to the next value with this length. */ |
| 1456 | if (i + 1 < jcnt) |
| 1457 | val = next[val]; |
| 1458 | |
| 1459 | /* The Huffman codes are stored in the bitstream with the |
| 1460 | most significant bit first, as is required to make them |
| 1461 | unambiguous. The effect is that when we read them from |
| 1462 | the bitstream we see the bit sequence in reverse order: |
| 1463 | the most significant bit of the Huffman code is the least |
| 1464 | significant bit of the value we read from the bitstream. |
| 1465 | That means that to make our table lookups work, we need |
| 1466 | to reverse the bits of CODE. Since reversing bits is |
| 1467 | tedious and in general requires using a table, we instead |
| 1468 | increment CODE in reverse order. That is, if the number |
| 1469 | of bits we are currently using, here named J, is 3, we |
| 1470 | count as 000, 100, 010, 110, 001, 101, 011, 111, which is |
| 1471 | to say the numbers from 0 to 7 but with the bits |
| 1472 | reversed. Going to more bits, aka incrementing J, |
| 1473 | effectively just adds more zero bits as the beginning, |
| 1474 | and as such does not change the numeric value of CODE. |
| 1475 | |
| 1476 | To increment CODE of length J in reverse order, find the |
| 1477 | most significant zero bit and set it to one while |
| 1478 | clearing all higher bits. In other words, add 1 modulo |
| 1479 | 2^J, only reversed. */ |
| 1480 | |
| 1481 | incr = 1U << (j - 1); |
| 1482 | while ((code & incr) != 0) |
| 1483 | incr >>= 1; |
| 1484 | if (incr == 0) |
| 1485 | code = 0; |
| 1486 | else |
| 1487 | { |
| 1488 | code &= incr - 1; |
| 1489 | code += incr; |
| 1490 | } |
| 1491 | } |
| 1492 | } |
| 1493 | |
| 1494 | /* Handle the values that require a secondary table. */ |
| 1495 | |
| 1496 | /* Set FIRSTCODE, the number at which the codes start, for each |
| 1497 | length. */ |
| 1498 | |
| 1499 | for (j = 9; j < 16; j++) |
| 1500 | { |
| 1501 | unsigned int jcnt; |
| 1502 | unsigned int k; |
| 1503 | |
| 1504 | jcnt = count[j]; |
| 1505 | if (jcnt == 0) |
| 1506 | continue; |
| 1507 | |
| 1508 | /* There are JCNT values that have this length, the values |
| 1509 | starting from START[j]. Those values are assigned |
| 1510 | consecutive values starting at CODE. */ |
| 1511 | |
| 1512 | firstcode[j - 9] = code; |
| 1513 | |
| 1514 | /* Reverse add JCNT to CODE modulo 2^J. */ |
| 1515 | for (k = 0; k < j; ++k) |
| 1516 | { |
| 1517 | if ((jcnt & (1U << k)) != 0) |
| 1518 | { |
| 1519 | unsigned int m; |
| 1520 | unsigned int bit; |
| 1521 | |
| 1522 | bit = 1U << (j - k - 1); |
| 1523 | for (m = 0; m < j - k; ++m, bit >>= 1) |
| 1524 | { |
| 1525 | if ((code & bit) == 0) |
| 1526 | { |
| 1527 | code += bit; |
| 1528 | break; |
| 1529 | } |
| 1530 | code &= ~bit; |
| 1531 | } |
| 1532 | jcnt &= ~(1U << k); |
| 1533 | } |
| 1534 | } |
| 1535 | if (unlikely (jcnt != 0)) |
| 1536 | { |
| 1537 | elf_uncompress_failed (); |
| 1538 | return 0; |
| 1539 | } |
| 1540 | } |
| 1541 | |
| 1542 | /* For J from 9 to 15, inclusive, we store COUNT[J] consecutive |
| 1543 | values starting at START[J] with consecutive codes starting at |
| 1544 | FIRSTCODE[J - 9]. In the primary table we need to point to the |
| 1545 | secondary table, and the secondary table will be indexed by J - 9 |
| 1546 | bits. We count down from 15 so that we install the larger |
| 1547 | secondary tables first, as the smaller ones may be embedded in |
| 1548 | the larger ones. */ |
| 1549 | |
| 1550 | next_secondary = 0; /* Index of next secondary table (after primary). */ |
| 1551 | for (j = 15; j >= 9; j--) |
| 1552 | { |
| 1553 | unsigned int jcnt; |
| 1554 | unsigned int val; |
| 1555 | size_t primary; /* Current primary index. */ |
| 1556 | size_t secondary; /* Offset to current secondary table. */ |
| 1557 | size_t secondary_bits; /* Bit size of current secondary table. */ |
| 1558 | |
| 1559 | jcnt = count[j]; |
| 1560 | if (jcnt == 0) |
| 1561 | continue; |
| 1562 | |
| 1563 | val = start[j]; |
| 1564 | code = firstcode[j - 9]; |
| 1565 | primary = 0x100; |
| 1566 | secondary = 0; |
| 1567 | secondary_bits = 0; |
| 1568 | for (i = 0; i < jcnt; ++i) |
| 1569 | { |
| 1570 | uint16_t tval; |
| 1571 | size_t ind; |
| 1572 | unsigned int incr; |
| 1573 | |
| 1574 | if ((code & 0xff) != primary) |
| 1575 | { |
| 1576 | uint16_t tprimary; |
| 1577 | |
| 1578 | /* Fill in a new primary table entry. */ |
| 1579 | |
| 1580 | primary = code & 0xff; |
| 1581 | |
| 1582 | tprimary = table[primary]; |
| 1583 | if (tprimary == 0) |
| 1584 | { |
| 1585 | /* Start a new secondary table. */ |
| 1586 | |
| 1587 | if (unlikely ((next_secondary & ZLIB_HUFFMAN_VALUE_MASK) |
| 1588 | != next_secondary)) |
| 1589 | { |
| 1590 | elf_uncompress_failed (); |
| 1591 | return 0; |
| 1592 | } |
| 1593 | |
| 1594 | secondary = next_secondary; |
| 1595 | secondary_bits = j - 8; |
| 1596 | next_secondary += 1 << secondary_bits; |
| 1597 | table[primary] = (secondary |
| 1598 | + ((j - 8) << ZLIB_HUFFMAN_BITS_SHIFT) |
| 1599 | + (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)); |
| 1600 | } |
| 1601 | else |
| 1602 | { |
| 1603 | /* There is an existing entry. It had better be a |
| 1604 | secondary table with enough bits. */ |
| 1605 | if (unlikely ((tprimary |
| 1606 | & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)) |
| 1607 | == 0)) |
| 1608 | { |
| 1609 | elf_uncompress_failed (); |
| 1610 | return 0; |
| 1611 | } |
| 1612 | secondary = tprimary & ZLIB_HUFFMAN_VALUE_MASK; |
| 1613 | secondary_bits = ((tprimary >> ZLIB_HUFFMAN_BITS_SHIFT) |
| 1614 | & ZLIB_HUFFMAN_BITS_MASK); |
| 1615 | if (unlikely (secondary_bits < j - 8)) |
| 1616 | { |
| 1617 | elf_uncompress_failed (); |
| 1618 | return 0; |
| 1619 | } |
| 1620 | } |
| 1621 | } |
| 1622 | |
| 1623 | /* Fill in secondary table entries. */ |
| 1624 | |
| 1625 | tval = val | ((j - 8) << ZLIB_HUFFMAN_BITS_SHIFT); |
| 1626 | |
| 1627 | for (ind = code >> 8; |
| 1628 | ind < (1U << secondary_bits); |
| 1629 | ind += 1U << (j - 8)) |
| 1630 | { |
| 1631 | if (unlikely (table[secondary + 0x100 + ind] != 0)) |
| 1632 | { |
| 1633 | elf_uncompress_failed (); |
| 1634 | return 0; |
| 1635 | } |
| 1636 | table[secondary + 0x100 + ind] = tval; |
| 1637 | } |
| 1638 | |
| 1639 | if (i + 1 < jcnt) |
| 1640 | val = next[val]; |
| 1641 | |
| 1642 | incr = 1U << (j - 1); |
| 1643 | while ((code & incr) != 0) |
| 1644 | incr >>= 1; |
| 1645 | if (incr == 0) |
| 1646 | code = 0; |
| 1647 | else |
| 1648 | { |
| 1649 | code &= incr - 1; |
| 1650 | code += incr; |
| 1651 | } |
| 1652 | } |
| 1653 | } |
| 1654 | |
| 1655 | #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE |
| 1656 | final_next_secondary = next_secondary; |
| 1657 | #endif |
| 1658 | |
| 1659 | return 1; |
| 1660 | } |
| 1661 | |
| 1662 | #ifdef BACKTRACE_GENERATE_FIXED_HUFFMAN_TABLE |
| 1663 | |
| 1664 | /* Used to generate the fixed Huffman table for block type 1. */ |
| 1665 | |
| 1666 | #include <stdio.h> |
| 1667 | |
| 1668 | static uint16_t table[ZLIB_TABLE_SIZE]; |
| 1669 | static unsigned char codes[288]; |
| 1670 | |
| 1671 | int |
| 1672 | main () |
| 1673 | { |
| 1674 | size_t i; |
| 1675 | |
| 1676 | for (i = 0; i <= 143; ++i) |
| 1677 | codes[i] = 8; |
| 1678 | for (i = 144; i <= 255; ++i) |
| 1679 | codes[i] = 9; |
| 1680 | for (i = 256; i <= 279; ++i) |
| 1681 | codes[i] = 7; |
| 1682 | for (i = 280; i <= 287; ++i) |
| 1683 | codes[i] = 8; |
| 1684 | if (!elf_zlib_inflate_table (&codes[0], 288, &table[0], &table[0])) |
| 1685 | { |
| 1686 | fprintf (stderr, "elf_zlib_inflate_table failed\n" ); |
| 1687 | exit (EXIT_FAILURE); |
| 1688 | } |
| 1689 | |
| 1690 | printf ("static const uint16_t elf_zlib_default_table[%#zx] =\n" , |
| 1691 | final_next_secondary + 0x100); |
| 1692 | printf ("{\n" ); |
| 1693 | for (i = 0; i < final_next_secondary + 0x100; i += 8) |
| 1694 | { |
| 1695 | size_t j; |
| 1696 | |
| 1697 | printf (" " ); |
| 1698 | for (j = i; j < final_next_secondary + 0x100 && j < i + 8; ++j) |
| 1699 | printf (" %#x," , table[j]); |
| 1700 | printf ("\n" ); |
| 1701 | } |
| 1702 | printf ("};\n" ); |
| 1703 | printf ("\n" ); |
| 1704 | |
| 1705 | for (i = 0; i < 32; ++i) |
| 1706 | codes[i] = 5; |
| 1707 | if (!elf_zlib_inflate_table (&codes[0], 32, &table[0], &table[0])) |
| 1708 | { |
| 1709 | fprintf (stderr, "elf_zlib_inflate_table failed\n" ); |
| 1710 | exit (EXIT_FAILURE); |
| 1711 | } |
| 1712 | |
| 1713 | printf ("static const uint16_t elf_zlib_default_dist_table[%#zx] =\n" , |
| 1714 | final_next_secondary + 0x100); |
| 1715 | printf ("{\n" ); |
| 1716 | for (i = 0; i < final_next_secondary + 0x100; i += 8) |
| 1717 | { |
| 1718 | size_t j; |
| 1719 | |
| 1720 | printf (" " ); |
| 1721 | for (j = i; j < final_next_secondary + 0x100 && j < i + 8; ++j) |
| 1722 | printf (" %#x," , table[j]); |
| 1723 | printf ("\n" ); |
| 1724 | } |
| 1725 | printf ("};\n" ); |
| 1726 | |
| 1727 | return 0; |
| 1728 | } |
| 1729 | |
| 1730 | #endif |
| 1731 | |
| 1732 | /* The fixed tables generated by the #ifdef'ed out main function |
| 1733 | above. */ |
| 1734 | |
| 1735 | static const uint16_t elf_zlib_default_table[0x170] = |
| 1736 | { |
| 1737 | 0xd00, 0xe50, 0xe10, 0xf18, 0xd10, 0xe70, 0xe30, 0x1230, |
| 1738 | 0xd08, 0xe60, 0xe20, 0x1210, 0xe00, 0xe80, 0xe40, 0x1250, |
| 1739 | 0xd04, 0xe58, 0xe18, 0x1200, 0xd14, 0xe78, 0xe38, 0x1240, |
| 1740 | 0xd0c, 0xe68, 0xe28, 0x1220, 0xe08, 0xe88, 0xe48, 0x1260, |
| 1741 | 0xd02, 0xe54, 0xe14, 0xf1c, 0xd12, 0xe74, 0xe34, 0x1238, |
| 1742 | 0xd0a, 0xe64, 0xe24, 0x1218, 0xe04, 0xe84, 0xe44, 0x1258, |
| 1743 | 0xd06, 0xe5c, 0xe1c, 0x1208, 0xd16, 0xe7c, 0xe3c, 0x1248, |
| 1744 | 0xd0e, 0xe6c, 0xe2c, 0x1228, 0xe0c, 0xe8c, 0xe4c, 0x1268, |
| 1745 | 0xd01, 0xe52, 0xe12, 0xf1a, 0xd11, 0xe72, 0xe32, 0x1234, |
| 1746 | 0xd09, 0xe62, 0xe22, 0x1214, 0xe02, 0xe82, 0xe42, 0x1254, |
| 1747 | 0xd05, 0xe5a, 0xe1a, 0x1204, 0xd15, 0xe7a, 0xe3a, 0x1244, |
| 1748 | 0xd0d, 0xe6a, 0xe2a, 0x1224, 0xe0a, 0xe8a, 0xe4a, 0x1264, |
| 1749 | 0xd03, 0xe56, 0xe16, 0xf1e, 0xd13, 0xe76, 0xe36, 0x123c, |
| 1750 | 0xd0b, 0xe66, 0xe26, 0x121c, 0xe06, 0xe86, 0xe46, 0x125c, |
| 1751 | 0xd07, 0xe5e, 0xe1e, 0x120c, 0xd17, 0xe7e, 0xe3e, 0x124c, |
| 1752 | 0xd0f, 0xe6e, 0xe2e, 0x122c, 0xe0e, 0xe8e, 0xe4e, 0x126c, |
| 1753 | 0xd00, 0xe51, 0xe11, 0xf19, 0xd10, 0xe71, 0xe31, 0x1232, |
| 1754 | 0xd08, 0xe61, 0xe21, 0x1212, 0xe01, 0xe81, 0xe41, 0x1252, |
| 1755 | 0xd04, 0xe59, 0xe19, 0x1202, 0xd14, 0xe79, 0xe39, 0x1242, |
| 1756 | 0xd0c, 0xe69, 0xe29, 0x1222, 0xe09, 0xe89, 0xe49, 0x1262, |
| 1757 | 0xd02, 0xe55, 0xe15, 0xf1d, 0xd12, 0xe75, 0xe35, 0x123a, |
| 1758 | 0xd0a, 0xe65, 0xe25, 0x121a, 0xe05, 0xe85, 0xe45, 0x125a, |
| 1759 | 0xd06, 0xe5d, 0xe1d, 0x120a, 0xd16, 0xe7d, 0xe3d, 0x124a, |
| 1760 | 0xd0e, 0xe6d, 0xe2d, 0x122a, 0xe0d, 0xe8d, 0xe4d, 0x126a, |
| 1761 | 0xd01, 0xe53, 0xe13, 0xf1b, 0xd11, 0xe73, 0xe33, 0x1236, |
| 1762 | 0xd09, 0xe63, 0xe23, 0x1216, 0xe03, 0xe83, 0xe43, 0x1256, |
| 1763 | 0xd05, 0xe5b, 0xe1b, 0x1206, 0xd15, 0xe7b, 0xe3b, 0x1246, |
| 1764 | 0xd0d, 0xe6b, 0xe2b, 0x1226, 0xe0b, 0xe8b, 0xe4b, 0x1266, |
| 1765 | 0xd03, 0xe57, 0xe17, 0xf1f, 0xd13, 0xe77, 0xe37, 0x123e, |
| 1766 | 0xd0b, 0xe67, 0xe27, 0x121e, 0xe07, 0xe87, 0xe47, 0x125e, |
| 1767 | 0xd07, 0xe5f, 0xe1f, 0x120e, 0xd17, 0xe7f, 0xe3f, 0x124e, |
| 1768 | 0xd0f, 0xe6f, 0xe2f, 0x122e, 0xe0f, 0xe8f, 0xe4f, 0x126e, |
| 1769 | 0x290, 0x291, 0x292, 0x293, 0x294, 0x295, 0x296, 0x297, |
| 1770 | 0x298, 0x299, 0x29a, 0x29b, 0x29c, 0x29d, 0x29e, 0x29f, |
| 1771 | 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x2a4, 0x2a5, 0x2a6, 0x2a7, |
| 1772 | 0x2a8, 0x2a9, 0x2aa, 0x2ab, 0x2ac, 0x2ad, 0x2ae, 0x2af, |
| 1773 | 0x2b0, 0x2b1, 0x2b2, 0x2b3, 0x2b4, 0x2b5, 0x2b6, 0x2b7, |
| 1774 | 0x2b8, 0x2b9, 0x2ba, 0x2bb, 0x2bc, 0x2bd, 0x2be, 0x2bf, |
| 1775 | 0x2c0, 0x2c1, 0x2c2, 0x2c3, 0x2c4, 0x2c5, 0x2c6, 0x2c7, |
| 1776 | 0x2c8, 0x2c9, 0x2ca, 0x2cb, 0x2cc, 0x2cd, 0x2ce, 0x2cf, |
| 1777 | 0x2d0, 0x2d1, 0x2d2, 0x2d3, 0x2d4, 0x2d5, 0x2d6, 0x2d7, |
| 1778 | 0x2d8, 0x2d9, 0x2da, 0x2db, 0x2dc, 0x2dd, 0x2de, 0x2df, |
| 1779 | 0x2e0, 0x2e1, 0x2e2, 0x2e3, 0x2e4, 0x2e5, 0x2e6, 0x2e7, |
| 1780 | 0x2e8, 0x2e9, 0x2ea, 0x2eb, 0x2ec, 0x2ed, 0x2ee, 0x2ef, |
| 1781 | 0x2f0, 0x2f1, 0x2f2, 0x2f3, 0x2f4, 0x2f5, 0x2f6, 0x2f7, |
| 1782 | 0x2f8, 0x2f9, 0x2fa, 0x2fb, 0x2fc, 0x2fd, 0x2fe, 0x2ff, |
| 1783 | }; |
| 1784 | |
| 1785 | static const uint16_t elf_zlib_default_dist_table[0x100] = |
| 1786 | { |
| 1787 | 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, |
| 1788 | 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, |
| 1789 | 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, |
| 1790 | 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, |
| 1791 | 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, |
| 1792 | 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, |
| 1793 | 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, |
| 1794 | 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, |
| 1795 | 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, |
| 1796 | 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, |
| 1797 | 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, |
| 1798 | 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, |
| 1799 | 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, |
| 1800 | 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, |
| 1801 | 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, |
| 1802 | 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, |
| 1803 | 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, |
| 1804 | 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, |
| 1805 | 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, |
| 1806 | 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, |
| 1807 | 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, |
| 1808 | 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, |
| 1809 | 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, |
| 1810 | 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, |
| 1811 | 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, |
| 1812 | 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, |
| 1813 | 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, |
| 1814 | 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, |
| 1815 | 0x800, 0x810, 0x808, 0x818, 0x804, 0x814, 0x80c, 0x81c, |
| 1816 | 0x802, 0x812, 0x80a, 0x81a, 0x806, 0x816, 0x80e, 0x81e, |
| 1817 | 0x801, 0x811, 0x809, 0x819, 0x805, 0x815, 0x80d, 0x81d, |
| 1818 | 0x803, 0x813, 0x80b, 0x81b, 0x807, 0x817, 0x80f, 0x81f, |
| 1819 | }; |
| 1820 | |
| 1821 | /* Inflate a zlib stream from PIN/SIN to POUT/SOUT. Return 1 on |
| 1822 | success, 0 on some error parsing the stream. */ |
| 1823 | |
| 1824 | static int |
| 1825 | elf_zlib_inflate (const unsigned char *pin, size_t sin, uint16_t *zdebug_table, |
| 1826 | unsigned char *pout, size_t sout) |
| 1827 | { |
| 1828 | unsigned char *porigout; |
| 1829 | const unsigned char *pinend; |
| 1830 | unsigned char *poutend; |
| 1831 | |
| 1832 | /* We can apparently see multiple zlib streams concatenated |
| 1833 | together, so keep going as long as there is something to read. |
| 1834 | The last 4 bytes are the checksum. */ |
| 1835 | porigout = pout; |
| 1836 | pinend = pin + sin; |
| 1837 | poutend = pout + sout; |
| 1838 | while ((pinend - pin) > 4) |
| 1839 | { |
| 1840 | uint64_t val; |
| 1841 | unsigned int bits; |
| 1842 | int last; |
| 1843 | |
| 1844 | /* Read the two byte zlib header. */ |
| 1845 | |
| 1846 | if (unlikely ((pin[0] & 0xf) != 8)) /* 8 is zlib encoding. */ |
| 1847 | { |
| 1848 | /* Unknown compression method. */ |
| 1849 | elf_uncompress_failed (); |
| 1850 | return 0; |
| 1851 | } |
| 1852 | if (unlikely ((pin[0] >> 4) > 7)) |
| 1853 | { |
| 1854 | /* Window size too large. Other than this check, we don't |
| 1855 | care about the window size. */ |
| 1856 | elf_uncompress_failed (); |
| 1857 | return 0; |
| 1858 | } |
| 1859 | if (unlikely ((pin[1] & 0x20) != 0)) |
| 1860 | { |
| 1861 | /* Stream expects a predefined dictionary, but we have no |
| 1862 | dictionary. */ |
| 1863 | elf_uncompress_failed (); |
| 1864 | return 0; |
| 1865 | } |
| 1866 | val = (pin[0] << 8) | pin[1]; |
| 1867 | if (unlikely (val % 31 != 0)) |
| 1868 | { |
| 1869 | /* Header check failure. */ |
| 1870 | elf_uncompress_failed (); |
| 1871 | return 0; |
| 1872 | } |
| 1873 | pin += 2; |
| 1874 | |
| 1875 | /* Align PIN to a 32-bit boundary. */ |
| 1876 | |
| 1877 | val = 0; |
| 1878 | bits = 0; |
| 1879 | while ((((uintptr_t) pin) & 3) != 0) |
| 1880 | { |
| 1881 | val |= (uint64_t)*pin << bits; |
| 1882 | bits += 8; |
| 1883 | ++pin; |
| 1884 | } |
| 1885 | |
| 1886 | /* Read blocks until one is marked last. */ |
| 1887 | |
| 1888 | last = 0; |
| 1889 | |
| 1890 | while (!last) |
| 1891 | { |
| 1892 | unsigned int type; |
| 1893 | const uint16_t *tlit; |
| 1894 | const uint16_t *tdist; |
| 1895 | |
| 1896 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 1897 | return 0; |
| 1898 | |
| 1899 | last = val & 1; |
| 1900 | type = (val >> 1) & 3; |
| 1901 | val >>= 3; |
| 1902 | bits -= 3; |
| 1903 | |
| 1904 | if (unlikely (type == 3)) |
| 1905 | { |
| 1906 | /* Invalid block type. */ |
| 1907 | elf_uncompress_failed (); |
| 1908 | return 0; |
| 1909 | } |
| 1910 | |
| 1911 | if (type == 0) |
| 1912 | { |
| 1913 | uint16_t len; |
| 1914 | uint16_t lenc; |
| 1915 | |
| 1916 | /* An uncompressed block. */ |
| 1917 | |
| 1918 | /* If we've read ahead more than a byte, back up. */ |
| 1919 | while (bits >= 8) |
| 1920 | { |
| 1921 | --pin; |
| 1922 | bits -= 8; |
| 1923 | } |
| 1924 | |
| 1925 | val = 0; |
| 1926 | bits = 0; |
| 1927 | if (unlikely ((pinend - pin) < 4)) |
| 1928 | { |
| 1929 | /* Missing length. */ |
| 1930 | elf_uncompress_failed (); |
| 1931 | return 0; |
| 1932 | } |
| 1933 | len = pin[0] | (pin[1] << 8); |
| 1934 | lenc = pin[2] | (pin[3] << 8); |
| 1935 | pin += 4; |
| 1936 | lenc = ~lenc; |
| 1937 | if (unlikely (len != lenc)) |
| 1938 | { |
| 1939 | /* Corrupt data. */ |
| 1940 | elf_uncompress_failed (); |
| 1941 | return 0; |
| 1942 | } |
| 1943 | if (unlikely (len > (unsigned int) (pinend - pin) |
| 1944 | || len > (unsigned int) (poutend - pout))) |
| 1945 | { |
| 1946 | /* Not enough space in buffers. */ |
| 1947 | elf_uncompress_failed (); |
| 1948 | return 0; |
| 1949 | } |
| 1950 | memcpy (dest: pout, src: pin, n: len); |
| 1951 | pout += len; |
| 1952 | pin += len; |
| 1953 | |
| 1954 | /* Align PIN. */ |
| 1955 | while ((((uintptr_t) pin) & 3) != 0) |
| 1956 | { |
| 1957 | val |= (uint64_t)*pin << bits; |
| 1958 | bits += 8; |
| 1959 | ++pin; |
| 1960 | } |
| 1961 | |
| 1962 | /* Go around to read the next block. */ |
| 1963 | continue; |
| 1964 | } |
| 1965 | |
| 1966 | if (type == 1) |
| 1967 | { |
| 1968 | tlit = elf_zlib_default_table; |
| 1969 | tdist = elf_zlib_default_dist_table; |
| 1970 | } |
| 1971 | else |
| 1972 | { |
| 1973 | unsigned int nlit; |
| 1974 | unsigned int ndist; |
| 1975 | unsigned int nclen; |
| 1976 | unsigned char codebits[19]; |
| 1977 | unsigned char *plenbase; |
| 1978 | unsigned char *plen; |
| 1979 | unsigned char *plenend; |
| 1980 | |
| 1981 | /* Read a Huffman encoding table. The various magic |
| 1982 | numbers here are from RFC 1951. */ |
| 1983 | |
| 1984 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 1985 | return 0; |
| 1986 | |
| 1987 | nlit = (val & 0x1f) + 257; |
| 1988 | val >>= 5; |
| 1989 | ndist = (val & 0x1f) + 1; |
| 1990 | val >>= 5; |
| 1991 | nclen = (val & 0xf) + 4; |
| 1992 | val >>= 4; |
| 1993 | bits -= 14; |
| 1994 | if (unlikely (nlit > 286 || ndist > 30)) |
| 1995 | { |
| 1996 | /* Values out of range. */ |
| 1997 | elf_uncompress_failed (); |
| 1998 | return 0; |
| 1999 | } |
| 2000 | |
| 2001 | /* Read and build the table used to compress the |
| 2002 | literal, length, and distance codes. */ |
| 2003 | |
| 2004 | memset(s: &codebits[0], c: 0, n: 19); |
| 2005 | |
| 2006 | /* There are always at least 4 elements in the |
| 2007 | table. */ |
| 2008 | |
| 2009 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2010 | return 0; |
| 2011 | |
| 2012 | codebits[16] = val & 7; |
| 2013 | codebits[17] = (val >> 3) & 7; |
| 2014 | codebits[18] = (val >> 6) & 7; |
| 2015 | codebits[0] = (val >> 9) & 7; |
| 2016 | val >>= 12; |
| 2017 | bits -= 12; |
| 2018 | |
| 2019 | if (nclen == 4) |
| 2020 | goto codebitsdone; |
| 2021 | |
| 2022 | codebits[8] = val & 7; |
| 2023 | val >>= 3; |
| 2024 | bits -= 3; |
| 2025 | |
| 2026 | if (nclen == 5) |
| 2027 | goto codebitsdone; |
| 2028 | |
| 2029 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2030 | return 0; |
| 2031 | |
| 2032 | codebits[7] = val & 7; |
| 2033 | val >>= 3; |
| 2034 | bits -= 3; |
| 2035 | |
| 2036 | if (nclen == 6) |
| 2037 | goto codebitsdone; |
| 2038 | |
| 2039 | codebits[9] = val & 7; |
| 2040 | val >>= 3; |
| 2041 | bits -= 3; |
| 2042 | |
| 2043 | if (nclen == 7) |
| 2044 | goto codebitsdone; |
| 2045 | |
| 2046 | codebits[6] = val & 7; |
| 2047 | val >>= 3; |
| 2048 | bits -= 3; |
| 2049 | |
| 2050 | if (nclen == 8) |
| 2051 | goto codebitsdone; |
| 2052 | |
| 2053 | codebits[10] = val & 7; |
| 2054 | val >>= 3; |
| 2055 | bits -= 3; |
| 2056 | |
| 2057 | if (nclen == 9) |
| 2058 | goto codebitsdone; |
| 2059 | |
| 2060 | codebits[5] = val & 7; |
| 2061 | val >>= 3; |
| 2062 | bits -= 3; |
| 2063 | |
| 2064 | if (nclen == 10) |
| 2065 | goto codebitsdone; |
| 2066 | |
| 2067 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2068 | return 0; |
| 2069 | |
| 2070 | codebits[11] = val & 7; |
| 2071 | val >>= 3; |
| 2072 | bits -= 3; |
| 2073 | |
| 2074 | if (nclen == 11) |
| 2075 | goto codebitsdone; |
| 2076 | |
| 2077 | codebits[4] = val & 7; |
| 2078 | val >>= 3; |
| 2079 | bits -= 3; |
| 2080 | |
| 2081 | if (nclen == 12) |
| 2082 | goto codebitsdone; |
| 2083 | |
| 2084 | codebits[12] = val & 7; |
| 2085 | val >>= 3; |
| 2086 | bits -= 3; |
| 2087 | |
| 2088 | if (nclen == 13) |
| 2089 | goto codebitsdone; |
| 2090 | |
| 2091 | codebits[3] = val & 7; |
| 2092 | val >>= 3; |
| 2093 | bits -= 3; |
| 2094 | |
| 2095 | if (nclen == 14) |
| 2096 | goto codebitsdone; |
| 2097 | |
| 2098 | codebits[13] = val & 7; |
| 2099 | val >>= 3; |
| 2100 | bits -= 3; |
| 2101 | |
| 2102 | if (nclen == 15) |
| 2103 | goto codebitsdone; |
| 2104 | |
| 2105 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2106 | return 0; |
| 2107 | |
| 2108 | codebits[2] = val & 7; |
| 2109 | val >>= 3; |
| 2110 | bits -= 3; |
| 2111 | |
| 2112 | if (nclen == 16) |
| 2113 | goto codebitsdone; |
| 2114 | |
| 2115 | codebits[14] = val & 7; |
| 2116 | val >>= 3; |
| 2117 | bits -= 3; |
| 2118 | |
| 2119 | if (nclen == 17) |
| 2120 | goto codebitsdone; |
| 2121 | |
| 2122 | codebits[1] = val & 7; |
| 2123 | val >>= 3; |
| 2124 | bits -= 3; |
| 2125 | |
| 2126 | if (nclen == 18) |
| 2127 | goto codebitsdone; |
| 2128 | |
| 2129 | codebits[15] = val & 7; |
| 2130 | val >>= 3; |
| 2131 | bits -= 3; |
| 2132 | |
| 2133 | codebitsdone: |
| 2134 | |
| 2135 | if (!elf_zlib_inflate_table (codes: codebits, codes_len: 19, zdebug_table, |
| 2136 | table: zdebug_table)) |
| 2137 | return 0; |
| 2138 | |
| 2139 | /* Read the compressed bit lengths of the literal, |
| 2140 | length, and distance codes. We have allocated space |
| 2141 | at the end of zdebug_table to hold them. */ |
| 2142 | |
| 2143 | plenbase = (((unsigned char *) zdebug_table) |
| 2144 | + ZLIB_TABLE_CODELEN_OFFSET); |
| 2145 | plen = plenbase; |
| 2146 | plenend = plen + nlit + ndist; |
| 2147 | while (plen < plenend) |
| 2148 | { |
| 2149 | uint16_t t; |
| 2150 | unsigned int b; |
| 2151 | uint16_t v; |
| 2152 | |
| 2153 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2154 | return 0; |
| 2155 | |
| 2156 | t = zdebug_table[val & 0xff]; |
| 2157 | |
| 2158 | /* The compression here uses bit lengths up to 7, so |
| 2159 | a secondary table is never necessary. */ |
| 2160 | if (unlikely ((t & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)) |
| 2161 | != 0)) |
| 2162 | { |
| 2163 | elf_uncompress_failed (); |
| 2164 | return 0; |
| 2165 | } |
| 2166 | |
| 2167 | b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK; |
| 2168 | val >>= b + 1; |
| 2169 | bits -= b + 1; |
| 2170 | |
| 2171 | v = t & ZLIB_HUFFMAN_VALUE_MASK; |
| 2172 | if (v < 16) |
| 2173 | *plen++ = v; |
| 2174 | else if (v == 16) |
| 2175 | { |
| 2176 | unsigned int c; |
| 2177 | unsigned int prev; |
| 2178 | |
| 2179 | /* Copy previous entry 3 to 6 times. */ |
| 2180 | |
| 2181 | if (unlikely (plen == plenbase)) |
| 2182 | { |
| 2183 | elf_uncompress_failed (); |
| 2184 | return 0; |
| 2185 | } |
| 2186 | |
| 2187 | /* We used up to 7 bits since the last |
| 2188 | elf_fetch_bits, so we have at least 8 bits |
| 2189 | available here. */ |
| 2190 | |
| 2191 | c = 3 + (val & 0x3); |
| 2192 | val >>= 2; |
| 2193 | bits -= 2; |
| 2194 | if (unlikely ((unsigned int) (plenend - plen) < c)) |
| 2195 | { |
| 2196 | elf_uncompress_failed (); |
| 2197 | return 0; |
| 2198 | } |
| 2199 | |
| 2200 | prev = plen[-1]; |
| 2201 | switch (c) |
| 2202 | { |
| 2203 | case 6: |
| 2204 | *plen++ = prev; |
| 2205 | ATTRIBUTE_FALLTHROUGH; |
| 2206 | case 5: |
| 2207 | *plen++ = prev; |
| 2208 | ATTRIBUTE_FALLTHROUGH; |
| 2209 | case 4: |
| 2210 | *plen++ = prev; |
| 2211 | } |
| 2212 | *plen++ = prev; |
| 2213 | *plen++ = prev; |
| 2214 | *plen++ = prev; |
| 2215 | } |
| 2216 | else if (v == 17) |
| 2217 | { |
| 2218 | unsigned int c; |
| 2219 | |
| 2220 | /* Store zero 3 to 10 times. */ |
| 2221 | |
| 2222 | /* We used up to 7 bits since the last |
| 2223 | elf_fetch_bits, so we have at least 8 bits |
| 2224 | available here. */ |
| 2225 | |
| 2226 | c = 3 + (val & 0x7); |
| 2227 | val >>= 3; |
| 2228 | bits -= 3; |
| 2229 | if (unlikely ((unsigned int) (plenend - plen) < c)) |
| 2230 | { |
| 2231 | elf_uncompress_failed (); |
| 2232 | return 0; |
| 2233 | } |
| 2234 | |
| 2235 | switch (c) |
| 2236 | { |
| 2237 | case 10: |
| 2238 | *plen++ = 0; |
| 2239 | ATTRIBUTE_FALLTHROUGH; |
| 2240 | case 9: |
| 2241 | *plen++ = 0; |
| 2242 | ATTRIBUTE_FALLTHROUGH; |
| 2243 | case 8: |
| 2244 | *plen++ = 0; |
| 2245 | ATTRIBUTE_FALLTHROUGH; |
| 2246 | case 7: |
| 2247 | *plen++ = 0; |
| 2248 | ATTRIBUTE_FALLTHROUGH; |
| 2249 | case 6: |
| 2250 | *plen++ = 0; |
| 2251 | ATTRIBUTE_FALLTHROUGH; |
| 2252 | case 5: |
| 2253 | *plen++ = 0; |
| 2254 | ATTRIBUTE_FALLTHROUGH; |
| 2255 | case 4: |
| 2256 | *plen++ = 0; |
| 2257 | } |
| 2258 | *plen++ = 0; |
| 2259 | *plen++ = 0; |
| 2260 | *plen++ = 0; |
| 2261 | } |
| 2262 | else if (v == 18) |
| 2263 | { |
| 2264 | unsigned int c; |
| 2265 | |
| 2266 | /* Store zero 11 to 138 times. */ |
| 2267 | |
| 2268 | /* We used up to 7 bits since the last |
| 2269 | elf_fetch_bits, so we have at least 8 bits |
| 2270 | available here. */ |
| 2271 | |
| 2272 | c = 11 + (val & 0x7f); |
| 2273 | val >>= 7; |
| 2274 | bits -= 7; |
| 2275 | if (unlikely ((unsigned int) (plenend - plen) < c)) |
| 2276 | { |
| 2277 | elf_uncompress_failed (); |
| 2278 | return 0; |
| 2279 | } |
| 2280 | |
| 2281 | memset (s: plen, c: 0, n: c); |
| 2282 | plen += c; |
| 2283 | } |
| 2284 | else |
| 2285 | { |
| 2286 | elf_uncompress_failed (); |
| 2287 | return 0; |
| 2288 | } |
| 2289 | } |
| 2290 | |
| 2291 | /* Make sure that the stop code can appear. */ |
| 2292 | |
| 2293 | plen = plenbase; |
| 2294 | if (unlikely (plen[256] == 0)) |
| 2295 | { |
| 2296 | elf_uncompress_failed (); |
| 2297 | return 0; |
| 2298 | } |
| 2299 | |
| 2300 | /* Build the decompression tables. */ |
| 2301 | |
| 2302 | if (!elf_zlib_inflate_table (codes: plen, codes_len: nlit, zdebug_table, |
| 2303 | table: zdebug_table)) |
| 2304 | return 0; |
| 2305 | if (!elf_zlib_inflate_table (codes: plen + nlit, codes_len: ndist, zdebug_table, |
| 2306 | table: (zdebug_table |
| 2307 | + ZLIB_HUFFMAN_TABLE_SIZE))) |
| 2308 | return 0; |
| 2309 | tlit = zdebug_table; |
| 2310 | tdist = zdebug_table + ZLIB_HUFFMAN_TABLE_SIZE; |
| 2311 | } |
| 2312 | |
| 2313 | /* Inflate values until the end of the block. This is the |
| 2314 | main loop of the inflation code. */ |
| 2315 | |
| 2316 | while (1) |
| 2317 | { |
| 2318 | uint16_t t; |
| 2319 | unsigned int b; |
| 2320 | uint16_t v; |
| 2321 | unsigned int lit; |
| 2322 | |
| 2323 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2324 | return 0; |
| 2325 | |
| 2326 | t = tlit[val & 0xff]; |
| 2327 | b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK; |
| 2328 | v = t & ZLIB_HUFFMAN_VALUE_MASK; |
| 2329 | |
| 2330 | if ((t & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)) == 0) |
| 2331 | { |
| 2332 | lit = v; |
| 2333 | val >>= b + 1; |
| 2334 | bits -= b + 1; |
| 2335 | } |
| 2336 | else |
| 2337 | { |
| 2338 | t = tlit[v + 0x100 + ((val >> 8) & ((1U << b) - 1))]; |
| 2339 | b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK; |
| 2340 | lit = t & ZLIB_HUFFMAN_VALUE_MASK; |
| 2341 | val >>= b + 8; |
| 2342 | bits -= b + 8; |
| 2343 | } |
| 2344 | |
| 2345 | if (lit < 256) |
| 2346 | { |
| 2347 | if (unlikely (pout == poutend)) |
| 2348 | { |
| 2349 | elf_uncompress_failed (); |
| 2350 | return 0; |
| 2351 | } |
| 2352 | |
| 2353 | *pout++ = lit; |
| 2354 | |
| 2355 | /* We will need to write the next byte soon. We ask |
| 2356 | for high temporal locality because we will write |
| 2357 | to the whole cache line soon. */ |
| 2358 | __builtin_prefetch (pout, 1, 3); |
| 2359 | } |
| 2360 | else if (lit == 256) |
| 2361 | { |
| 2362 | /* The end of the block. */ |
| 2363 | break; |
| 2364 | } |
| 2365 | else |
| 2366 | { |
| 2367 | unsigned int dist; |
| 2368 | unsigned int len; |
| 2369 | |
| 2370 | /* Convert lit into a length. */ |
| 2371 | |
| 2372 | if (lit < 265) |
| 2373 | len = lit - 257 + 3; |
| 2374 | else if (lit == 285) |
| 2375 | len = 258; |
| 2376 | else if (unlikely (lit > 285)) |
| 2377 | { |
| 2378 | elf_uncompress_failed (); |
| 2379 | return 0; |
| 2380 | } |
| 2381 | else |
| 2382 | { |
| 2383 | unsigned int ; |
| 2384 | |
| 2385 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2386 | return 0; |
| 2387 | |
| 2388 | /* This is an expression for the table of length |
| 2389 | codes in RFC 1951 3.2.5. */ |
| 2390 | lit -= 265; |
| 2391 | extra = (lit >> 2) + 1; |
| 2392 | len = (lit & 3) << extra; |
| 2393 | len += 11; |
| 2394 | len += ((1U << (extra - 1)) - 1) << 3; |
| 2395 | len += val & ((1U << extra) - 1); |
| 2396 | val >>= extra; |
| 2397 | bits -= extra; |
| 2398 | } |
| 2399 | |
| 2400 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2401 | return 0; |
| 2402 | |
| 2403 | t = tdist[val & 0xff]; |
| 2404 | b = (t >> ZLIB_HUFFMAN_BITS_SHIFT) & ZLIB_HUFFMAN_BITS_MASK; |
| 2405 | v = t & ZLIB_HUFFMAN_VALUE_MASK; |
| 2406 | |
| 2407 | if ((t & (1U << ZLIB_HUFFMAN_SECONDARY_SHIFT)) == 0) |
| 2408 | { |
| 2409 | dist = v; |
| 2410 | val >>= b + 1; |
| 2411 | bits -= b + 1; |
| 2412 | } |
| 2413 | else |
| 2414 | { |
| 2415 | t = tdist[v + 0x100 + ((val >> 8) & ((1U << b) - 1))]; |
| 2416 | b = ((t >> ZLIB_HUFFMAN_BITS_SHIFT) |
| 2417 | & ZLIB_HUFFMAN_BITS_MASK); |
| 2418 | dist = t & ZLIB_HUFFMAN_VALUE_MASK; |
| 2419 | val >>= b + 8; |
| 2420 | bits -= b + 8; |
| 2421 | } |
| 2422 | |
| 2423 | /* Convert dist to a distance. */ |
| 2424 | |
| 2425 | if (dist == 0) |
| 2426 | { |
| 2427 | /* A distance of 1. A common case, meaning |
| 2428 | repeat the last character LEN times. */ |
| 2429 | |
| 2430 | if (unlikely (pout == porigout)) |
| 2431 | { |
| 2432 | elf_uncompress_failed (); |
| 2433 | return 0; |
| 2434 | } |
| 2435 | |
| 2436 | if (unlikely ((unsigned int) (poutend - pout) < len)) |
| 2437 | { |
| 2438 | elf_uncompress_failed (); |
| 2439 | return 0; |
| 2440 | } |
| 2441 | |
| 2442 | memset (s: pout, c: pout[-1], n: len); |
| 2443 | pout += len; |
| 2444 | } |
| 2445 | else if (unlikely (dist > 29)) |
| 2446 | { |
| 2447 | elf_uncompress_failed (); |
| 2448 | return 0; |
| 2449 | } |
| 2450 | else |
| 2451 | { |
| 2452 | if (dist < 4) |
| 2453 | dist = dist + 1; |
| 2454 | else |
| 2455 | { |
| 2456 | unsigned int ; |
| 2457 | |
| 2458 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2459 | return 0; |
| 2460 | |
| 2461 | /* This is an expression for the table of |
| 2462 | distance codes in RFC 1951 3.2.5. */ |
| 2463 | dist -= 4; |
| 2464 | extra = (dist >> 1) + 1; |
| 2465 | dist = (dist & 1) << extra; |
| 2466 | dist += 5; |
| 2467 | dist += ((1U << (extra - 1)) - 1) << 2; |
| 2468 | dist += val & ((1U << extra) - 1); |
| 2469 | val >>= extra; |
| 2470 | bits -= extra; |
| 2471 | } |
| 2472 | |
| 2473 | /* Go back dist bytes, and copy len bytes from |
| 2474 | there. */ |
| 2475 | |
| 2476 | if (unlikely ((unsigned int) (pout - porigout) < dist)) |
| 2477 | { |
| 2478 | elf_uncompress_failed (); |
| 2479 | return 0; |
| 2480 | } |
| 2481 | |
| 2482 | if (unlikely ((unsigned int) (poutend - pout) < len)) |
| 2483 | { |
| 2484 | elf_uncompress_failed (); |
| 2485 | return 0; |
| 2486 | } |
| 2487 | |
| 2488 | if (dist >= len) |
| 2489 | { |
| 2490 | memcpy (dest: pout, src: pout - dist, n: len); |
| 2491 | pout += len; |
| 2492 | } |
| 2493 | else |
| 2494 | { |
| 2495 | while (len > 0) |
| 2496 | { |
| 2497 | unsigned int copy; |
| 2498 | |
| 2499 | copy = len < dist ? len : dist; |
| 2500 | memcpy (dest: pout, src: pout - dist, n: copy); |
| 2501 | len -= copy; |
| 2502 | pout += copy; |
| 2503 | } |
| 2504 | } |
| 2505 | } |
| 2506 | } |
| 2507 | } |
| 2508 | } |
| 2509 | } |
| 2510 | |
| 2511 | /* We should have filled the output buffer. */ |
| 2512 | if (unlikely (pout != poutend)) |
| 2513 | { |
| 2514 | elf_uncompress_failed (); |
| 2515 | return 0; |
| 2516 | } |
| 2517 | |
| 2518 | return 1; |
| 2519 | } |
| 2520 | |
| 2521 | /* Verify the zlib checksum. The checksum is in the 4 bytes at |
| 2522 | CHECKBYTES, and the uncompressed data is at UNCOMPRESSED / |
| 2523 | UNCOMPRESSED_SIZE. Returns 1 on success, 0 on failure. */ |
| 2524 | |
| 2525 | static int |
| 2526 | elf_zlib_verify_checksum (const unsigned char *checkbytes, |
| 2527 | const unsigned char *uncompressed, |
| 2528 | size_t uncompressed_size) |
| 2529 | { |
| 2530 | unsigned int i; |
| 2531 | unsigned int cksum; |
| 2532 | const unsigned char *p; |
| 2533 | uint32_t s1; |
| 2534 | uint32_t s2; |
| 2535 | size_t hsz; |
| 2536 | |
| 2537 | cksum = 0; |
| 2538 | for (i = 0; i < 4; i++) |
| 2539 | cksum = (cksum << 8) | checkbytes[i]; |
| 2540 | |
| 2541 | s1 = 1; |
| 2542 | s2 = 0; |
| 2543 | |
| 2544 | /* Minimize modulo operations. */ |
| 2545 | |
| 2546 | p = uncompressed; |
| 2547 | hsz = uncompressed_size; |
| 2548 | while (hsz >= 5552) |
| 2549 | { |
| 2550 | for (i = 0; i < 5552; i += 16) |
| 2551 | { |
| 2552 | /* Manually unroll loop 16 times. */ |
| 2553 | s1 = s1 + *p++; |
| 2554 | s2 = s2 + s1; |
| 2555 | s1 = s1 + *p++; |
| 2556 | s2 = s2 + s1; |
| 2557 | s1 = s1 + *p++; |
| 2558 | s2 = s2 + s1; |
| 2559 | s1 = s1 + *p++; |
| 2560 | s2 = s2 + s1; |
| 2561 | s1 = s1 + *p++; |
| 2562 | s2 = s2 + s1; |
| 2563 | s1 = s1 + *p++; |
| 2564 | s2 = s2 + s1; |
| 2565 | s1 = s1 + *p++; |
| 2566 | s2 = s2 + s1; |
| 2567 | s1 = s1 + *p++; |
| 2568 | s2 = s2 + s1; |
| 2569 | s1 = s1 + *p++; |
| 2570 | s2 = s2 + s1; |
| 2571 | s1 = s1 + *p++; |
| 2572 | s2 = s2 + s1; |
| 2573 | s1 = s1 + *p++; |
| 2574 | s2 = s2 + s1; |
| 2575 | s1 = s1 + *p++; |
| 2576 | s2 = s2 + s1; |
| 2577 | s1 = s1 + *p++; |
| 2578 | s2 = s2 + s1; |
| 2579 | s1 = s1 + *p++; |
| 2580 | s2 = s2 + s1; |
| 2581 | s1 = s1 + *p++; |
| 2582 | s2 = s2 + s1; |
| 2583 | s1 = s1 + *p++; |
| 2584 | s2 = s2 + s1; |
| 2585 | } |
| 2586 | hsz -= 5552; |
| 2587 | s1 %= 65521; |
| 2588 | s2 %= 65521; |
| 2589 | } |
| 2590 | |
| 2591 | while (hsz >= 16) |
| 2592 | { |
| 2593 | /* Manually unroll loop 16 times. */ |
| 2594 | s1 = s1 + *p++; |
| 2595 | s2 = s2 + s1; |
| 2596 | s1 = s1 + *p++; |
| 2597 | s2 = s2 + s1; |
| 2598 | s1 = s1 + *p++; |
| 2599 | s2 = s2 + s1; |
| 2600 | s1 = s1 + *p++; |
| 2601 | s2 = s2 + s1; |
| 2602 | s1 = s1 + *p++; |
| 2603 | s2 = s2 + s1; |
| 2604 | s1 = s1 + *p++; |
| 2605 | s2 = s2 + s1; |
| 2606 | s1 = s1 + *p++; |
| 2607 | s2 = s2 + s1; |
| 2608 | s1 = s1 + *p++; |
| 2609 | s2 = s2 + s1; |
| 2610 | s1 = s1 + *p++; |
| 2611 | s2 = s2 + s1; |
| 2612 | s1 = s1 + *p++; |
| 2613 | s2 = s2 + s1; |
| 2614 | s1 = s1 + *p++; |
| 2615 | s2 = s2 + s1; |
| 2616 | s1 = s1 + *p++; |
| 2617 | s2 = s2 + s1; |
| 2618 | s1 = s1 + *p++; |
| 2619 | s2 = s2 + s1; |
| 2620 | s1 = s1 + *p++; |
| 2621 | s2 = s2 + s1; |
| 2622 | s1 = s1 + *p++; |
| 2623 | s2 = s2 + s1; |
| 2624 | s1 = s1 + *p++; |
| 2625 | s2 = s2 + s1; |
| 2626 | |
| 2627 | hsz -= 16; |
| 2628 | } |
| 2629 | |
| 2630 | for (i = 0; i < hsz; ++i) |
| 2631 | { |
| 2632 | s1 = s1 + *p++; |
| 2633 | s2 = s2 + s1; |
| 2634 | } |
| 2635 | |
| 2636 | s1 %= 65521; |
| 2637 | s2 %= 65521; |
| 2638 | |
| 2639 | if (unlikely ((s2 << 16) + s1 != cksum)) |
| 2640 | { |
| 2641 | elf_uncompress_failed (); |
| 2642 | return 0; |
| 2643 | } |
| 2644 | |
| 2645 | return 1; |
| 2646 | } |
| 2647 | |
| 2648 | /* Inflate a zlib stream from PIN/SIN to POUT/SOUT, and verify the |
| 2649 | checksum. Return 1 on success, 0 on error. */ |
| 2650 | |
| 2651 | static int |
| 2652 | elf_zlib_inflate_and_verify (const unsigned char *pin, size_t sin, |
| 2653 | uint16_t *zdebug_table, unsigned char *pout, |
| 2654 | size_t sout) |
| 2655 | { |
| 2656 | if (!elf_zlib_inflate (pin, sin, zdebug_table, pout, sout)) |
| 2657 | return 0; |
| 2658 | if (!elf_zlib_verify_checksum (checkbytes: pin + sin - 4, uncompressed: pout, uncompressed_size: sout)) |
| 2659 | return 0; |
| 2660 | return 1; |
| 2661 | } |
| 2662 | |
| 2663 | /* For working memory during zstd compression, we need |
| 2664 | - a literal length FSE table: 512 64-bit values == 4096 bytes |
| 2665 | - a match length FSE table: 512 64-bit values == 4096 bytes |
| 2666 | - a offset FSE table: 256 64-bit values == 2048 bytes |
| 2667 | - a Huffman tree: 2048 uint16_t values == 4096 bytes |
| 2668 | - scratch space, one of |
| 2669 | - to build an FSE table: 512 uint16_t values == 1024 bytes |
| 2670 | - to build a Huffman tree: 512 uint16_t + 256 uint32_t == 2048 bytes |
| 2671 | */ |
| 2672 | |
| 2673 | #define ZSTD_TABLE_SIZE \ |
| 2674 | (2 * 512 * sizeof (struct elf_zstd_fse_baseline_entry) \ |
| 2675 | + 256 * sizeof (struct elf_zstd_fse_baseline_entry) \ |
| 2676 | + 2048 * sizeof (uint16_t) \ |
| 2677 | + 512 * sizeof (uint16_t) + 256 * sizeof (uint32_t)) |
| 2678 | |
| 2679 | #define ZSTD_TABLE_LITERAL_FSE_OFFSET (0) |
| 2680 | |
| 2681 | #define ZSTD_TABLE_MATCH_FSE_OFFSET \ |
| 2682 | (512 * sizeof (struct elf_zstd_fse_baseline_entry)) |
| 2683 | |
| 2684 | #define ZSTD_TABLE_OFFSET_FSE_OFFSET \ |
| 2685 | (ZSTD_TABLE_MATCH_FSE_OFFSET \ |
| 2686 | + 512 * sizeof (struct elf_zstd_fse_baseline_entry)) |
| 2687 | |
| 2688 | #define ZSTD_TABLE_HUFFMAN_OFFSET \ |
| 2689 | (ZSTD_TABLE_OFFSET_FSE_OFFSET \ |
| 2690 | + 256 * sizeof (struct elf_zstd_fse_baseline_entry)) |
| 2691 | |
| 2692 | #define ZSTD_TABLE_WORK_OFFSET \ |
| 2693 | (ZSTD_TABLE_HUFFMAN_OFFSET + 2048 * sizeof (uint16_t)) |
| 2694 | |
| 2695 | /* An entry in a zstd FSE table. */ |
| 2696 | |
| 2697 | struct elf_zstd_fse_entry |
| 2698 | { |
| 2699 | /* The value that this FSE entry represents. */ |
| 2700 | unsigned char symbol; |
| 2701 | /* The number of bits to read to determine the next state. */ |
| 2702 | unsigned char bits; |
| 2703 | /* Add the bits to this base to get the next state. */ |
| 2704 | uint16_t base; |
| 2705 | }; |
| 2706 | |
| 2707 | static int |
| 2708 | elf_zstd_build_fse (const int16_t *, int, uint16_t *, int, |
| 2709 | struct elf_zstd_fse_entry *); |
| 2710 | |
| 2711 | /* Read a zstd FSE table and build the decoding table in *TABLE, updating *PPIN |
| 2712 | as it reads. ZDEBUG_TABLE is scratch space; it must be enough for 512 |
| 2713 | uint16_t values (1024 bytes). MAXIDX is the maximum number of symbols |
| 2714 | permitted. *TABLE_BITS is the maximum number of bits for symbols in the |
| 2715 | table: the size of *TABLE is at least 1 << *TABLE_BITS. This updates |
| 2716 | *TABLE_BITS to the actual number of bits. Returns 1 on success, 0 on |
| 2717 | error. */ |
| 2718 | |
| 2719 | static int |
| 2720 | elf_zstd_read_fse (const unsigned char **ppin, const unsigned char *pinend, |
| 2721 | uint16_t *zdebug_table, int maxidx, |
| 2722 | struct elf_zstd_fse_entry *table, int *table_bits) |
| 2723 | { |
| 2724 | const unsigned char *pin; |
| 2725 | int16_t *norm; |
| 2726 | uint16_t *next; |
| 2727 | uint64_t val; |
| 2728 | unsigned int bits; |
| 2729 | int accuracy_log; |
| 2730 | uint32_t remaining; |
| 2731 | uint32_t threshold; |
| 2732 | int bits_needed; |
| 2733 | int idx; |
| 2734 | int prev0; |
| 2735 | |
| 2736 | pin = *ppin; |
| 2737 | |
| 2738 | norm = (int16_t *) zdebug_table; |
| 2739 | next = zdebug_table + 256; |
| 2740 | |
| 2741 | if (unlikely (pin + 3 >= pinend)) |
| 2742 | { |
| 2743 | elf_uncompress_failed (); |
| 2744 | return 0; |
| 2745 | } |
| 2746 | |
| 2747 | /* Align PIN to a 32-bit boundary. */ |
| 2748 | |
| 2749 | val = 0; |
| 2750 | bits = 0; |
| 2751 | while ((((uintptr_t) pin) & 3) != 0) |
| 2752 | { |
| 2753 | val |= (uint64_t)*pin << bits; |
| 2754 | bits += 8; |
| 2755 | ++pin; |
| 2756 | } |
| 2757 | |
| 2758 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2759 | return 0; |
| 2760 | |
| 2761 | accuracy_log = (val & 0xf) + 5; |
| 2762 | if (accuracy_log > *table_bits) |
| 2763 | { |
| 2764 | elf_uncompress_failed (); |
| 2765 | return 0; |
| 2766 | } |
| 2767 | *table_bits = accuracy_log; |
| 2768 | val >>= 4; |
| 2769 | bits -= 4; |
| 2770 | |
| 2771 | /* This code is mostly copied from the reference implementation. */ |
| 2772 | |
| 2773 | /* The number of remaining probabilities, plus 1. This sets the number of |
| 2774 | bits that need to be read for the next value. */ |
| 2775 | remaining = (1 << accuracy_log) + 1; |
| 2776 | |
| 2777 | /* The current difference between small and large values, which depends on |
| 2778 | the number of remaining values. Small values use one less bit. */ |
| 2779 | threshold = 1 << accuracy_log; |
| 2780 | |
| 2781 | /* The number of bits used to compute threshold. */ |
| 2782 | bits_needed = accuracy_log + 1; |
| 2783 | |
| 2784 | /* The next character value. */ |
| 2785 | idx = 0; |
| 2786 | |
| 2787 | /* Whether the last count was 0. */ |
| 2788 | prev0 = 0; |
| 2789 | |
| 2790 | while (remaining > 1 && idx <= maxidx) |
| 2791 | { |
| 2792 | uint32_t max; |
| 2793 | int32_t count; |
| 2794 | |
| 2795 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2796 | return 0; |
| 2797 | |
| 2798 | if (prev0) |
| 2799 | { |
| 2800 | int zidx; |
| 2801 | |
| 2802 | /* Previous count was 0, so there is a 2-bit repeat flag. If the |
| 2803 | 2-bit flag is 0b11, it adds 3 and then there is another repeat |
| 2804 | flag. */ |
| 2805 | zidx = idx; |
| 2806 | while ((val & 0xfff) == 0xfff) |
| 2807 | { |
| 2808 | zidx += 3 * 6; |
| 2809 | val >>= 12; |
| 2810 | bits -= 12; |
| 2811 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2812 | return 0; |
| 2813 | } |
| 2814 | while ((val & 3) == 3) |
| 2815 | { |
| 2816 | zidx += 3; |
| 2817 | val >>= 2; |
| 2818 | bits -= 2; |
| 2819 | if (!elf_fetch_bits (ppin: &pin, pinend, pval: &val, pbits: &bits)) |
| 2820 | return 0; |
| 2821 | } |
| 2822 | /* We have at least 13 bits here, don't need to fetch. */ |
| 2823 | zidx += val & 3; |
| 2824 | val >>= 2; |
| 2825 | bits -= 2; |
| 2826 | |
| 2827 | if (unlikely (zidx > maxidx)) |
| 2828 | { |
| 2829 | elf_uncompress_failed (); |
| 2830 | return 0; |
| 2831 | } |
| 2832 | |
| 2833 | for (; idx < zidx; idx++) |
| 2834 | norm[idx] = 0; |
| 2835 | |
| 2836 | prev0 = 0; |
| 2837 | continue; |
| 2838 | } |
| 2839 | |
| 2840 | max = (2 * threshold - 1) - remaining; |
| 2841 | if ((val & (threshold - 1)) < max) |
| 2842 | { |
| 2843 | /* A small value. */ |
| 2844 | count = (int32_t) ((uint32_t) val & (threshold - 1)); |
| 2845 | val >>= bits_needed - 1; |
| 2846 | bits -= bits_needed - 1; |
| 2847 | } |
| 2848 | else |
| 2849 | { |
| 2850 | /* A large value. */ |
| 2851 | count = (int32_t) ((uint32_t) val & (2 * threshold - 1)); |
| 2852 | if (count >= (int32_t) threshold) |
| 2853 | count -= (int32_t) max; |
| 2854 | val >>= bits_needed; |
| 2855 | bits -= bits_needed; |
| 2856 | } |
| 2857 | |
| 2858 | count--; |
| 2859 | if (count >= 0) |
| 2860 | remaining -= count; |
| 2861 | else |
| 2862 | remaining--; |
| 2863 | if (unlikely (idx >= 256)) |
| 2864 | { |
| 2865 | elf_uncompress_failed (); |
| 2866 | return 0; |
| 2867 | } |
| 2868 | norm[idx] = (int16_t) count; |
| 2869 | ++idx; |
| 2870 | |
| 2871 | prev0 = count == 0; |
| 2872 | |
| 2873 | while (remaining < threshold) |
| 2874 | { |
| 2875 | bits_needed--; |
| 2876 | threshold >>= 1; |
| 2877 | } |
| 2878 | } |
| 2879 | |
| 2880 | if (unlikely (remaining != 1)) |
| 2881 | { |
| 2882 | elf_uncompress_failed (); |
| 2883 | return 0; |
| 2884 | } |
| 2885 | |
| 2886 | /* If we've read ahead more than a byte, back up. */ |
| 2887 | while (bits >= 8) |
| 2888 | { |
| 2889 | --pin; |
| 2890 | bits -= 8; |
| 2891 | } |
| 2892 | |
| 2893 | *ppin = pin; |
| 2894 | |
| 2895 | for (; idx <= maxidx; idx++) |
| 2896 | norm[idx] = 0; |
| 2897 | |
| 2898 | return elf_zstd_build_fse (norm, idx, next, *table_bits, table); |
| 2899 | } |
| 2900 | |
| 2901 | /* Build the FSE decoding table from a list of probabilities. This reads from |
| 2902 | NORM of length IDX, uses NEXT as scratch space, and writes to *TABLE, whose |
| 2903 | size is TABLE_BITS. */ |
| 2904 | |
| 2905 | static int |
| 2906 | elf_zstd_build_fse (const int16_t *norm, int idx, uint16_t *next, |
| 2907 | int table_bits, struct elf_zstd_fse_entry *table) |
| 2908 | { |
| 2909 | int table_size; |
| 2910 | int high_threshold; |
| 2911 | int i; |
| 2912 | int pos; |
| 2913 | int step; |
| 2914 | int mask; |
| 2915 | |
| 2916 | table_size = 1 << table_bits; |
| 2917 | high_threshold = table_size - 1; |
| 2918 | for (i = 0; i < idx; i++) |
| 2919 | { |
| 2920 | int16_t n; |
| 2921 | |
| 2922 | n = norm[i]; |
| 2923 | if (n >= 0) |
| 2924 | next[i] = (uint16_t) n; |
| 2925 | else |
| 2926 | { |
| 2927 | table[high_threshold].symbol = (unsigned char) i; |
| 2928 | high_threshold--; |
| 2929 | next[i] = 1; |
| 2930 | } |
| 2931 | } |
| 2932 | |
| 2933 | pos = 0; |
| 2934 | step = (table_size >> 1) + (table_size >> 3) + 3; |
| 2935 | mask = table_size - 1; |
| 2936 | for (i = 0; i < idx; i++) |
| 2937 | { |
| 2938 | int n; |
| 2939 | int j; |
| 2940 | |
| 2941 | n = (int) norm[i]; |
| 2942 | for (j = 0; j < n; j++) |
| 2943 | { |
| 2944 | table[pos].symbol = (unsigned char) i; |
| 2945 | pos = (pos + step) & mask; |
| 2946 | while (unlikely (pos > high_threshold)) |
| 2947 | pos = (pos + step) & mask; |
| 2948 | } |
| 2949 | } |
| 2950 | if (unlikely (pos != 0)) |
| 2951 | { |
| 2952 | elf_uncompress_failed (); |
| 2953 | return 0; |
| 2954 | } |
| 2955 | |
| 2956 | for (i = 0; i < table_size; i++) |
| 2957 | { |
| 2958 | unsigned char sym; |
| 2959 | uint16_t next_state; |
| 2960 | int high_bit; |
| 2961 | int bits; |
| 2962 | |
| 2963 | sym = table[i].symbol; |
| 2964 | next_state = next[sym]; |
| 2965 | ++next[sym]; |
| 2966 | |
| 2967 | if (next_state == 0) |
| 2968 | { |
| 2969 | elf_uncompress_failed (); |
| 2970 | return 0; |
| 2971 | } |
| 2972 | high_bit = 31 - __builtin_clz (next_state); |
| 2973 | |
| 2974 | bits = table_bits - high_bit; |
| 2975 | table[i].bits = (unsigned char) bits; |
| 2976 | table[i].base = (uint16_t) ((next_state << bits) - table_size); |
| 2977 | } |
| 2978 | |
| 2979 | return 1; |
| 2980 | } |
| 2981 | |
| 2982 | /* Encode the baseline and bits into a single 32-bit value. */ |
| 2983 | |
| 2984 | #define ZSTD_ENCODE_BASELINE_BITS(baseline, basebits) \ |
| 2985 | ((uint32_t)(baseline) | ((uint32_t)(basebits) << 24)) |
| 2986 | |
| 2987 | #define ZSTD_DECODE_BASELINE(baseline_basebits) \ |
| 2988 | ((uint32_t)(baseline_basebits) & 0xffffff) |
| 2989 | |
| 2990 | #define ZSTD_DECODE_BASEBITS(baseline_basebits) \ |
| 2991 | ((uint32_t)(baseline_basebits) >> 24) |
| 2992 | |
| 2993 | /* Given a literal length code, we need to read a number of bits and add that |
| 2994 | to a baseline. For states 0 to 15 the baseline is the state and the number |
| 2995 | of bits is zero. */ |
| 2996 | |
| 2997 | #define ZSTD_LITERAL_LENGTH_BASELINE_OFFSET (16) |
| 2998 | |
| 2999 | static const uint32_t elf_zstd_literal_length_base[] = |
| 3000 | { |
| 3001 | ZSTD_ENCODE_BASELINE_BITS(16, 1), |
| 3002 | ZSTD_ENCODE_BASELINE_BITS(18, 1), |
| 3003 | ZSTD_ENCODE_BASELINE_BITS(20, 1), |
| 3004 | ZSTD_ENCODE_BASELINE_BITS(22, 1), |
| 3005 | ZSTD_ENCODE_BASELINE_BITS(24, 2), |
| 3006 | ZSTD_ENCODE_BASELINE_BITS(28, 2), |
| 3007 | ZSTD_ENCODE_BASELINE_BITS(32, 3), |
| 3008 | ZSTD_ENCODE_BASELINE_BITS(40, 3), |
| 3009 | ZSTD_ENCODE_BASELINE_BITS(48, 4), |
| 3010 | ZSTD_ENCODE_BASELINE_BITS(64, 6), |
| 3011 | ZSTD_ENCODE_BASELINE_BITS(128, 7), |
| 3012 | ZSTD_ENCODE_BASELINE_BITS(256, 8), |
| 3013 | ZSTD_ENCODE_BASELINE_BITS(512, 9), |
| 3014 | ZSTD_ENCODE_BASELINE_BITS(1024, 10), |
| 3015 | ZSTD_ENCODE_BASELINE_BITS(2048, 11), |
| 3016 | ZSTD_ENCODE_BASELINE_BITS(4096, 12), |
| 3017 | ZSTD_ENCODE_BASELINE_BITS(8192, 13), |
| 3018 | ZSTD_ENCODE_BASELINE_BITS(16384, 14), |
| 3019 | ZSTD_ENCODE_BASELINE_BITS(32768, 15), |
| 3020 | ZSTD_ENCODE_BASELINE_BITS(65536, 16) |
| 3021 | }; |
| 3022 | |
| 3023 | /* The same applies to match length codes. For states 0 to 31 the baseline is |
| 3024 | the state + 3 and the number of bits is zero. */ |
| 3025 | |
| 3026 | #define ZSTD_MATCH_LENGTH_BASELINE_OFFSET (32) |
| 3027 | |
| 3028 | static const uint32_t elf_zstd_match_length_base[] = |
| 3029 | { |
| 3030 | ZSTD_ENCODE_BASELINE_BITS(35, 1), |
| 3031 | ZSTD_ENCODE_BASELINE_BITS(37, 1), |
| 3032 | ZSTD_ENCODE_BASELINE_BITS(39, 1), |
| 3033 | ZSTD_ENCODE_BASELINE_BITS(41, 1), |
| 3034 | ZSTD_ENCODE_BASELINE_BITS(43, 2), |
| 3035 | ZSTD_ENCODE_BASELINE_BITS(47, 2), |
| 3036 | ZSTD_ENCODE_BASELINE_BITS(51, 3), |
| 3037 | ZSTD_ENCODE_BASELINE_BITS(59, 3), |
| 3038 | ZSTD_ENCODE_BASELINE_BITS(67, 4), |
| 3039 | ZSTD_ENCODE_BASELINE_BITS(83, 4), |
| 3040 | ZSTD_ENCODE_BASELINE_BITS(99, 5), |
| 3041 | ZSTD_ENCODE_BASELINE_BITS(131, 7), |
| 3042 | ZSTD_ENCODE_BASELINE_BITS(259, 8), |
| 3043 | ZSTD_ENCODE_BASELINE_BITS(515, 9), |
| 3044 | ZSTD_ENCODE_BASELINE_BITS(1027, 10), |
| 3045 | ZSTD_ENCODE_BASELINE_BITS(2051, 11), |
| 3046 | ZSTD_ENCODE_BASELINE_BITS(4099, 12), |
| 3047 | ZSTD_ENCODE_BASELINE_BITS(8195, 13), |
| 3048 | ZSTD_ENCODE_BASELINE_BITS(16387, 14), |
| 3049 | ZSTD_ENCODE_BASELINE_BITS(32771, 15), |
| 3050 | ZSTD_ENCODE_BASELINE_BITS(65539, 16) |
| 3051 | }; |
| 3052 | |
| 3053 | /* An entry in an FSE table used for literal/match/length values. For these we |
| 3054 | have to map the symbol to a baseline value, and we have to read zero or more |
| 3055 | bits and add that value to the baseline value. Rather than look the values |
| 3056 | up in a separate table, we grow the FSE table so that we get better memory |
| 3057 | caching. */ |
| 3058 | |
| 3059 | struct elf_zstd_fse_baseline_entry |
| 3060 | { |
| 3061 | /* The baseline for the value that this FSE entry represents.. */ |
| 3062 | uint32_t baseline; |
| 3063 | /* The number of bits to read to add to the baseline. */ |
| 3064 | unsigned char basebits; |
| 3065 | /* The number of bits to read to determine the next state. */ |
| 3066 | unsigned char bits; |
| 3067 | /* Add the bits to this base to get the next state. */ |
| 3068 | uint16_t base; |
| 3069 | }; |
| 3070 | |
| 3071 | /* Convert the literal length FSE table FSE_TABLE to an FSE baseline table at |
| 3072 | BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */ |
| 3073 | |
| 3074 | static int |
| 3075 | elf_zstd_make_literal_baseline_fse ( |
| 3076 | const struct elf_zstd_fse_entry *fse_table, |
| 3077 | int table_bits, |
| 3078 | struct elf_zstd_fse_baseline_entry *baseline_table) |
| 3079 | { |
| 3080 | size_t count; |
| 3081 | const struct elf_zstd_fse_entry *pfse; |
| 3082 | struct elf_zstd_fse_baseline_entry *pbaseline; |
| 3083 | |
| 3084 | /* Convert backward to avoid overlap. */ |
| 3085 | |
| 3086 | count = 1U << table_bits; |
| 3087 | pfse = fse_table + count; |
| 3088 | pbaseline = baseline_table + count; |
| 3089 | while (pfse > fse_table) |
| 3090 | { |
| 3091 | unsigned char symbol; |
| 3092 | unsigned char bits; |
| 3093 | uint16_t base; |
| 3094 | |
| 3095 | --pfse; |
| 3096 | --pbaseline; |
| 3097 | symbol = pfse->symbol; |
| 3098 | bits = pfse->bits; |
| 3099 | base = pfse->base; |
| 3100 | if (symbol < ZSTD_LITERAL_LENGTH_BASELINE_OFFSET) |
| 3101 | { |
| 3102 | pbaseline->baseline = (uint32_t)symbol; |
| 3103 | pbaseline->basebits = 0; |
| 3104 | } |
| 3105 | else |
| 3106 | { |
| 3107 | unsigned int idx; |
| 3108 | uint32_t basebits; |
| 3109 | |
| 3110 | if (unlikely (symbol > 35)) |
| 3111 | { |
| 3112 | elf_uncompress_failed (); |
| 3113 | return 0; |
| 3114 | } |
| 3115 | idx = symbol - ZSTD_LITERAL_LENGTH_BASELINE_OFFSET; |
| 3116 | basebits = elf_zstd_literal_length_base[idx]; |
| 3117 | pbaseline->baseline = ZSTD_DECODE_BASELINE(basebits); |
| 3118 | pbaseline->basebits = ZSTD_DECODE_BASEBITS(basebits); |
| 3119 | } |
| 3120 | pbaseline->bits = bits; |
| 3121 | pbaseline->base = base; |
| 3122 | } |
| 3123 | |
| 3124 | return 1; |
| 3125 | } |
| 3126 | |
| 3127 | /* Convert the offset length FSE table FSE_TABLE to an FSE baseline table at |
| 3128 | BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */ |
| 3129 | |
| 3130 | static int |
| 3131 | elf_zstd_make_offset_baseline_fse ( |
| 3132 | const struct elf_zstd_fse_entry *fse_table, |
| 3133 | int table_bits, |
| 3134 | struct elf_zstd_fse_baseline_entry *baseline_table) |
| 3135 | { |
| 3136 | size_t count; |
| 3137 | const struct elf_zstd_fse_entry *pfse; |
| 3138 | struct elf_zstd_fse_baseline_entry *pbaseline; |
| 3139 | |
| 3140 | /* Convert backward to avoid overlap. */ |
| 3141 | |
| 3142 | count = 1U << table_bits; |
| 3143 | pfse = fse_table + count; |
| 3144 | pbaseline = baseline_table + count; |
| 3145 | while (pfse > fse_table) |
| 3146 | { |
| 3147 | unsigned char symbol; |
| 3148 | unsigned char bits; |
| 3149 | uint16_t base; |
| 3150 | |
| 3151 | --pfse; |
| 3152 | --pbaseline; |
| 3153 | symbol = pfse->symbol; |
| 3154 | bits = pfse->bits; |
| 3155 | base = pfse->base; |
| 3156 | if (unlikely (symbol > 31)) |
| 3157 | { |
| 3158 | elf_uncompress_failed (); |
| 3159 | return 0; |
| 3160 | } |
| 3161 | |
| 3162 | /* The simple way to write this is |
| 3163 | |
| 3164 | pbaseline->baseline = (uint32_t)1 << symbol; |
| 3165 | pbaseline->basebits = symbol; |
| 3166 | |
| 3167 | That will give us an offset value that corresponds to the one |
| 3168 | described in the RFC. However, for offset values > 3, we have to |
| 3169 | subtract 3. And for offset values 1, 2, 3 we use a repeated offset. |
| 3170 | The baseline is always a power of 2, and is never 0, so for these low |
| 3171 | values we will see one entry that is baseline 1, basebits 0, and one |
| 3172 | entry that is baseline 2, basebits 1. All other entries will have |
| 3173 | baseline >= 4 and basebits >= 2. |
| 3174 | |
| 3175 | So we can check for RFC offset <= 3 by checking for basebits <= 1. |
| 3176 | And that means that we can subtract 3 here and not worry about doing |
| 3177 | it in the hot loop. */ |
| 3178 | |
| 3179 | pbaseline->baseline = (uint32_t)1 << symbol; |
| 3180 | if (symbol >= 2) |
| 3181 | pbaseline->baseline -= 3; |
| 3182 | pbaseline->basebits = symbol; |
| 3183 | pbaseline->bits = bits; |
| 3184 | pbaseline->base = base; |
| 3185 | } |
| 3186 | |
| 3187 | return 1; |
| 3188 | } |
| 3189 | |
| 3190 | /* Convert the match length FSE table FSE_TABLE to an FSE baseline table at |
| 3191 | BASELINE_TABLE. Note that FSE_TABLE and BASELINE_TABLE will overlap. */ |
| 3192 | |
| 3193 | static int |
| 3194 | elf_zstd_make_match_baseline_fse ( |
| 3195 | const struct elf_zstd_fse_entry *fse_table, |
| 3196 | int table_bits, |
| 3197 | struct elf_zstd_fse_baseline_entry *baseline_table) |
| 3198 | { |
| 3199 | size_t count; |
| 3200 | const struct elf_zstd_fse_entry *pfse; |
| 3201 | struct elf_zstd_fse_baseline_entry *pbaseline; |
| 3202 | |
| 3203 | /* Convert backward to avoid overlap. */ |
| 3204 | |
| 3205 | count = 1U << table_bits; |
| 3206 | pfse = fse_table + count; |
| 3207 | pbaseline = baseline_table + count; |
| 3208 | while (pfse > fse_table) |
| 3209 | { |
| 3210 | unsigned char symbol; |
| 3211 | unsigned char bits; |
| 3212 | uint16_t base; |
| 3213 | |
| 3214 | --pfse; |
| 3215 | --pbaseline; |
| 3216 | symbol = pfse->symbol; |
| 3217 | bits = pfse->bits; |
| 3218 | base = pfse->base; |
| 3219 | if (symbol < ZSTD_MATCH_LENGTH_BASELINE_OFFSET) |
| 3220 | { |
| 3221 | pbaseline->baseline = (uint32_t)symbol + 3; |
| 3222 | pbaseline->basebits = 0; |
| 3223 | } |
| 3224 | else |
| 3225 | { |
| 3226 | unsigned int idx; |
| 3227 | uint32_t basebits; |
| 3228 | |
| 3229 | if (unlikely (symbol > 52)) |
| 3230 | { |
| 3231 | elf_uncompress_failed (); |
| 3232 | return 0; |
| 3233 | } |
| 3234 | idx = symbol - ZSTD_MATCH_LENGTH_BASELINE_OFFSET; |
| 3235 | basebits = elf_zstd_match_length_base[idx]; |
| 3236 | pbaseline->baseline = ZSTD_DECODE_BASELINE(basebits); |
| 3237 | pbaseline->basebits = ZSTD_DECODE_BASEBITS(basebits); |
| 3238 | } |
| 3239 | pbaseline->bits = bits; |
| 3240 | pbaseline->base = base; |
| 3241 | } |
| 3242 | |
| 3243 | return 1; |
| 3244 | } |
| 3245 | |
| 3246 | #ifdef BACKTRACE_GENERATE_ZSTD_FSE_TABLES |
| 3247 | |
| 3248 | /* Used to generate the predefined FSE decoding tables for zstd. */ |
| 3249 | |
| 3250 | #include <stdio.h> |
| 3251 | |
| 3252 | /* These values are straight from RFC 8878. */ |
| 3253 | |
| 3254 | static int16_t lit[36] = |
| 3255 | { |
| 3256 | 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, |
| 3257 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, |
| 3258 | -1,-1,-1,-1 |
| 3259 | }; |
| 3260 | |
| 3261 | static int16_t match[53] = |
| 3262 | { |
| 3263 | 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, |
| 3264 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, |
| 3265 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1, |
| 3266 | -1,-1,-1,-1,-1 |
| 3267 | }; |
| 3268 | |
| 3269 | static int16_t offset[29] = |
| 3270 | { |
| 3271 | 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, |
| 3272 | 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 |
| 3273 | }; |
| 3274 | |
| 3275 | static uint16_t next[256]; |
| 3276 | |
| 3277 | static void |
| 3278 | print_table (const struct elf_zstd_fse_baseline_entry *table, size_t size) |
| 3279 | { |
| 3280 | size_t i; |
| 3281 | |
| 3282 | printf ("{\n" ); |
| 3283 | for (i = 0; i < size; i += 3) |
| 3284 | { |
| 3285 | int j; |
| 3286 | |
| 3287 | printf (" " ); |
| 3288 | for (j = 0; j < 3 && i + j < size; ++j) |
| 3289 | printf (" { %u, %d, %d, %d }," , table[i + j].baseline, |
| 3290 | table[i + j].basebits, table[i + j].bits, |
| 3291 | table[i + j].base); |
| 3292 | printf ("\n" ); |
| 3293 | } |
| 3294 | printf ("};\n" ); |
| 3295 | } |
| 3296 | |
| 3297 | int |
| 3298 | main () |
| 3299 | { |
| 3300 | struct elf_zstd_fse_entry lit_table[64]; |
| 3301 | struct elf_zstd_fse_baseline_entry lit_baseline[64]; |
| 3302 | struct elf_zstd_fse_entry match_table[64]; |
| 3303 | struct elf_zstd_fse_baseline_entry match_baseline[64]; |
| 3304 | struct elf_zstd_fse_entry offset_table[32]; |
| 3305 | struct elf_zstd_fse_baseline_entry offset_baseline[32]; |
| 3306 | |
| 3307 | if (!elf_zstd_build_fse (lit, sizeof lit / sizeof lit[0], next, |
| 3308 | 6, lit_table)) |
| 3309 | { |
| 3310 | fprintf (stderr, "elf_zstd_build_fse failed\n" ); |
| 3311 | exit (EXIT_FAILURE); |
| 3312 | } |
| 3313 | |
| 3314 | if (!elf_zstd_make_literal_baseline_fse (lit_table, 6, lit_baseline)) |
| 3315 | { |
| 3316 | fprintf (stderr, "elf_zstd_make_literal_baseline_fse failed\n" ); |
| 3317 | exit (EXIT_FAILURE); |
| 3318 | } |
| 3319 | |
| 3320 | printf ("static const struct elf_zstd_fse_baseline_entry " |
| 3321 | "elf_zstd_lit_table[64] =\n" ); |
| 3322 | print_table (lit_baseline, |
| 3323 | sizeof lit_baseline / sizeof lit_baseline[0]); |
| 3324 | printf ("\n" ); |
| 3325 | |
| 3326 | if (!elf_zstd_build_fse (match, sizeof match / sizeof match[0], next, |
| 3327 | 6, match_table)) |
| 3328 | { |
| 3329 | fprintf (stderr, "elf_zstd_build_fse failed\n" ); |
| 3330 | exit (EXIT_FAILURE); |
| 3331 | } |
| 3332 | |
| 3333 | if (!elf_zstd_make_match_baseline_fse (match_table, 6, match_baseline)) |
| 3334 | { |
| 3335 | fprintf (stderr, "elf_zstd_make_match_baseline_fse failed\n" ); |
| 3336 | exit (EXIT_FAILURE); |
| 3337 | } |
| 3338 | |
| 3339 | printf ("static const struct elf_zstd_fse_baseline_entry " |
| 3340 | "elf_zstd_match_table[64] =\n" ); |
| 3341 | print_table (match_baseline, |
| 3342 | sizeof match_baseline / sizeof match_baseline[0]); |
| 3343 | printf ("\n" ); |
| 3344 | |
| 3345 | if (!elf_zstd_build_fse (offset, sizeof offset / sizeof offset[0], next, |
| 3346 | 5, offset_table)) |
| 3347 | { |
| 3348 | fprintf (stderr, "elf_zstd_build_fse failed\n" ); |
| 3349 | exit (EXIT_FAILURE); |
| 3350 | } |
| 3351 | |
| 3352 | if (!elf_zstd_make_offset_baseline_fse (offset_table, 5, offset_baseline)) |
| 3353 | { |
| 3354 | fprintf (stderr, "elf_zstd_make_offset_baseline_fse failed\n" ); |
| 3355 | exit (EXIT_FAILURE); |
| 3356 | } |
| 3357 | |
| 3358 | printf ("static const struct elf_zstd_fse_baseline_entry " |
| 3359 | "elf_zstd_offset_table[32] =\n" ); |
| 3360 | print_table (offset_baseline, |
| 3361 | sizeof offset_baseline / sizeof offset_baseline[0]); |
| 3362 | printf ("\n" ); |
| 3363 | |
| 3364 | return 0; |
| 3365 | } |
| 3366 | |
| 3367 | #endif |
| 3368 | |
| 3369 | /* The fixed tables generated by the #ifdef'ed out main function |
| 3370 | above. */ |
| 3371 | |
| 3372 | static const struct elf_zstd_fse_baseline_entry elf_zstd_lit_table[64] = |
| 3373 | { |
| 3374 | { .baseline: 0, .basebits: 0, .bits: 4, .base: 0 }, { .baseline: 0, .basebits: 0, .bits: 4, .base: 16 }, { .baseline: 1, .basebits: 0, .bits: 5, .base: 32 }, |
| 3375 | { .baseline: 3, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 4, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 6, .basebits: 0, .bits: 5, .base: 0 }, |
| 3376 | { .baseline: 7, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 9, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 10, .basebits: 0, .bits: 5, .base: 0 }, |
| 3377 | { .baseline: 12, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 14, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 16, .basebits: 1, .bits: 5, .base: 0 }, |
| 3378 | { .baseline: 20, .basebits: 1, .bits: 5, .base: 0 }, { .baseline: 22, .basebits: 1, .bits: 5, .base: 0 }, { .baseline: 28, .basebits: 2, .bits: 5, .base: 0 }, |
| 3379 | { .baseline: 32, .basebits: 3, .bits: 5, .base: 0 }, { .baseline: 48, .basebits: 4, .bits: 5, .base: 0 }, { .baseline: 64, .basebits: 6, .bits: 5, .base: 32 }, |
| 3380 | { .baseline: 128, .basebits: 7, .bits: 5, .base: 0 }, { .baseline: 256, .basebits: 8, .bits: 6, .base: 0 }, { .baseline: 1024, .basebits: 10, .bits: 6, .base: 0 }, |
| 3381 | { .baseline: 4096, .basebits: 12, .bits: 6, .base: 0 }, { .baseline: 0, .basebits: 0, .bits: 4, .base: 32 }, { .baseline: 1, .basebits: 0, .bits: 4, .base: 0 }, |
| 3382 | { .baseline: 2, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 4, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 5, .basebits: 0, .bits: 5, .base: 0 }, |
| 3383 | { .baseline: 7, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 8, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 10, .basebits: 0, .bits: 5, .base: 32 }, |
| 3384 | { .baseline: 11, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 13, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 16, .basebits: 1, .bits: 5, .base: 32 }, |
| 3385 | { .baseline: 18, .basebits: 1, .bits: 5, .base: 0 }, { .baseline: 22, .basebits: 1, .bits: 5, .base: 32 }, { .baseline: 24, .basebits: 2, .bits: 5, .base: 0 }, |
| 3386 | { .baseline: 32, .basebits: 3, .bits: 5, .base: 32 }, { .baseline: 40, .basebits: 3, .bits: 5, .base: 0 }, { .baseline: 64, .basebits: 6, .bits: 4, .base: 0 }, |
| 3387 | { .baseline: 64, .basebits: 6, .bits: 4, .base: 16 }, { .baseline: 128, .basebits: 7, .bits: 5, .base: 32 }, { .baseline: 512, .basebits: 9, .bits: 6, .base: 0 }, |
| 3388 | { .baseline: 2048, .basebits: 11, .bits: 6, .base: 0 }, { .baseline: 0, .basebits: 0, .bits: 4, .base: 48 }, { .baseline: 1, .basebits: 0, .bits: 4, .base: 16 }, |
| 3389 | { .baseline: 2, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 3, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 5, .basebits: 0, .bits: 5, .base: 32 }, |
| 3390 | { .baseline: 6, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 8, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 9, .basebits: 0, .bits: 5, .base: 32 }, |
| 3391 | { .baseline: 11, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 12, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 15, .basebits: 0, .bits: 6, .base: 0 }, |
| 3392 | { .baseline: 18, .basebits: 1, .bits: 5, .base: 32 }, { .baseline: 20, .basebits: 1, .bits: 5, .base: 32 }, { .baseline: 24, .basebits: 2, .bits: 5, .base: 32 }, |
| 3393 | { .baseline: 28, .basebits: 2, .bits: 5, .base: 32 }, { .baseline: 40, .basebits: 3, .bits: 5, .base: 32 }, { .baseline: 48, .basebits: 4, .bits: 5, .base: 32 }, |
| 3394 | { .baseline: 65536, .basebits: 16, .bits: 6, .base: 0 }, { .baseline: 32768, .basebits: 15, .bits: 6, .base: 0 }, { .baseline: 16384, .basebits: 14, .bits: 6, .base: 0 }, |
| 3395 | { .baseline: 8192, .basebits: 13, .bits: 6, .base: 0 }, |
| 3396 | }; |
| 3397 | |
| 3398 | static const struct elf_zstd_fse_baseline_entry elf_zstd_match_table[64] = |
| 3399 | { |
| 3400 | { .baseline: 3, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 4, .basebits: 0, .bits: 4, .base: 0 }, { .baseline: 5, .basebits: 0, .bits: 5, .base: 32 }, |
| 3401 | { .baseline: 6, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 8, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 9, .basebits: 0, .bits: 5, .base: 0 }, |
| 3402 | { .baseline: 11, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 13, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 16, .basebits: 0, .bits: 6, .base: 0 }, |
| 3403 | { .baseline: 19, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 22, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 25, .basebits: 0, .bits: 6, .base: 0 }, |
| 3404 | { .baseline: 28, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 31, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 34, .basebits: 0, .bits: 6, .base: 0 }, |
| 3405 | { .baseline: 37, .basebits: 1, .bits: 6, .base: 0 }, { .baseline: 41, .basebits: 1, .bits: 6, .base: 0 }, { .baseline: 47, .basebits: 2, .bits: 6, .base: 0 }, |
| 3406 | { .baseline: 59, .basebits: 3, .bits: 6, .base: 0 }, { .baseline: 83, .basebits: 4, .bits: 6, .base: 0 }, { .baseline: 131, .basebits: 7, .bits: 6, .base: 0 }, |
| 3407 | { .baseline: 515, .basebits: 9, .bits: 6, .base: 0 }, { .baseline: 4, .basebits: 0, .bits: 4, .base: 16 }, { .baseline: 5, .basebits: 0, .bits: 4, .base: 0 }, |
| 3408 | { .baseline: 6, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 7, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 9, .basebits: 0, .bits: 5, .base: 32 }, |
| 3409 | { .baseline: 10, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 12, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 15, .basebits: 0, .bits: 6, .base: 0 }, |
| 3410 | { .baseline: 18, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 21, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 24, .basebits: 0, .bits: 6, .base: 0 }, |
| 3411 | { .baseline: 27, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 30, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 33, .basebits: 0, .bits: 6, .base: 0 }, |
| 3412 | { .baseline: 35, .basebits: 1, .bits: 6, .base: 0 }, { .baseline: 39, .basebits: 1, .bits: 6, .base: 0 }, { .baseline: 43, .basebits: 2, .bits: 6, .base: 0 }, |
| 3413 | { .baseline: 51, .basebits: 3, .bits: 6, .base: 0 }, { .baseline: 67, .basebits: 4, .bits: 6, .base: 0 }, { .baseline: 99, .basebits: 5, .bits: 6, .base: 0 }, |
| 3414 | { .baseline: 259, .basebits: 8, .bits: 6, .base: 0 }, { .baseline: 4, .basebits: 0, .bits: 4, .base: 32 }, { .baseline: 4, .basebits: 0, .bits: 4, .base: 48 }, |
| 3415 | { .baseline: 5, .basebits: 0, .bits: 4, .base: 16 }, { .baseline: 7, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 8, .basebits: 0, .bits: 5, .base: 32 }, |
| 3416 | { .baseline: 10, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 11, .basebits: 0, .bits: 5, .base: 32 }, { .baseline: 14, .basebits: 0, .bits: 6, .base: 0 }, |
| 3417 | { .baseline: 17, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 20, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 23, .basebits: 0, .bits: 6, .base: 0 }, |
| 3418 | { .baseline: 26, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 29, .basebits: 0, .bits: 6, .base: 0 }, { .baseline: 32, .basebits: 0, .bits: 6, .base: 0 }, |
| 3419 | { .baseline: 65539, .basebits: 16, .bits: 6, .base: 0 }, { .baseline: 32771, .basebits: 15, .bits: 6, .base: 0 }, { .baseline: 16387, .basebits: 14, .bits: 6, .base: 0 }, |
| 3420 | { .baseline: 8195, .basebits: 13, .bits: 6, .base: 0 }, { .baseline: 4099, .basebits: 12, .bits: 6, .base: 0 }, { .baseline: 2051, .basebits: 11, .bits: 6, .base: 0 }, |
| 3421 | { .baseline: 1027, .basebits: 10, .bits: 6, .base: 0 }, |
| 3422 | }; |
| 3423 | |
| 3424 | static const struct elf_zstd_fse_baseline_entry elf_zstd_offset_table[32] = |
| 3425 | { |
| 3426 | { .baseline: 1, .basebits: 0, .bits: 5, .base: 0 }, { .baseline: 61, .basebits: 6, .bits: 4, .base: 0 }, { .baseline: 509, .basebits: 9, .bits: 5, .base: 0 }, |
| 3427 | { .baseline: 32765, .basebits: 15, .bits: 5, .base: 0 }, { .baseline: 2097149, .basebits: 21, .bits: 5, .base: 0 }, { .baseline: 5, .basebits: 3, .bits: 5, .base: 0 }, |
| 3428 | { .baseline: 125, .basebits: 7, .bits: 4, .base: 0 }, { .baseline: 4093, .basebits: 12, .bits: 5, .base: 0 }, { .baseline: 262141, .basebits: 18, .bits: 5, .base: 0 }, |
| 3429 | { .baseline: 8388605, .basebits: 23, .bits: 5, .base: 0 }, { .baseline: 29, .basebits: 5, .bits: 5, .base: 0 }, { .baseline: 253, .basebits: 8, .bits: 4, .base: 0 }, |
| 3430 | { .baseline: 16381, .basebits: 14, .bits: 5, .base: 0 }, { .baseline: 1048573, .basebits: 20, .bits: 5, .base: 0 }, { .baseline: 1, .basebits: 2, .bits: 5, .base: 0 }, |
| 3431 | { .baseline: 125, .basebits: 7, .bits: 4, .base: 16 }, { .baseline: 2045, .basebits: 11, .bits: 5, .base: 0 }, { .baseline: 131069, .basebits: 17, .bits: 5, .base: 0 }, |
| 3432 | { .baseline: 4194301, .basebits: 22, .bits: 5, .base: 0 }, { .baseline: 13, .basebits: 4, .bits: 5, .base: 0 }, { .baseline: 253, .basebits: 8, .bits: 4, .base: 16 }, |
| 3433 | { .baseline: 8189, .basebits: 13, .bits: 5, .base: 0 }, { .baseline: 524285, .basebits: 19, .bits: 5, .base: 0 }, { .baseline: 2, .basebits: 1, .bits: 5, .base: 0 }, |
| 3434 | { .baseline: 61, .basebits: 6, .bits: 4, .base: 16 }, { .baseline: 1021, .basebits: 10, .bits: 5, .base: 0 }, { .baseline: 65533, .basebits: 16, .bits: 5, .base: 0 }, |
| 3435 | { .baseline: 268435453, .basebits: 28, .bits: 5, .base: 0 }, { .baseline: 134217725, .basebits: 27, .bits: 5, .base: 0 }, { .baseline: 67108861, .basebits: 26, .bits: 5, .base: 0 }, |
| 3436 | { .baseline: 33554429, .basebits: 25, .bits: 5, .base: 0 }, { .baseline: 16777213, .basebits: 24, .bits: 5, .base: 0 }, |
| 3437 | }; |
| 3438 | |
| 3439 | /* Read a zstd Huffman table and build the decoding table in *TABLE, reading |
| 3440 | and updating *PPIN. This sets *PTABLE_BITS to the number of bits of the |
| 3441 | table, such that the table length is 1 << *TABLE_BITS. ZDEBUG_TABLE is |
| 3442 | scratch space; it must be enough for 512 uint16_t values + 256 32-bit values |
| 3443 | (2048 bytes). Returns 1 on success, 0 on error. */ |
| 3444 | |
| 3445 | static int |
| 3446 | elf_zstd_read_huff (const unsigned char **ppin, const unsigned char *pinend, |
| 3447 | uint16_t *zdebug_table, uint16_t *table, int *ptable_bits) |
| 3448 | { |
| 3449 | const unsigned char *pin; |
| 3450 | unsigned char hdr; |
| 3451 | unsigned char *weights; |
| 3452 | size_t count; |
| 3453 | uint32_t *weight_mark; |
| 3454 | size_t i; |
| 3455 | uint32_t weight_mask; |
| 3456 | size_t table_bits; |
| 3457 | |
| 3458 | pin = *ppin; |
| 3459 | if (unlikely (pin >= pinend)) |
| 3460 | { |
| 3461 | elf_uncompress_failed (); |
| 3462 | return 0; |
| 3463 | } |
| 3464 | hdr = *pin; |
| 3465 | ++pin; |
| 3466 | |
| 3467 | weights = (unsigned char *) zdebug_table; |
| 3468 | |
| 3469 | if (hdr < 128) |
| 3470 | { |
| 3471 | /* Table is compressed using FSE. */ |
| 3472 | |
| 3473 | struct elf_zstd_fse_entry *fse_table; |
| 3474 | int fse_table_bits; |
| 3475 | uint16_t *scratch; |
| 3476 | const unsigned char *pfse; |
| 3477 | const unsigned char *pback; |
| 3478 | uint64_t val; |
| 3479 | unsigned int bits; |
| 3480 | unsigned int state1, state2; |
| 3481 | |
| 3482 | /* SCRATCH is used temporarily by elf_zstd_read_fse. It overlaps |
| 3483 | WEIGHTS. */ |
| 3484 | scratch = zdebug_table; |
| 3485 | fse_table = (struct elf_zstd_fse_entry *) (scratch + 512); |
| 3486 | fse_table_bits = 6; |
| 3487 | |
| 3488 | pfse = pin; |
| 3489 | if (!elf_zstd_read_fse (ppin: &pfse, pinend, zdebug_table: scratch, maxidx: 255, table: fse_table, |
| 3490 | table_bits: &fse_table_bits)) |
| 3491 | return 0; |
| 3492 | |
| 3493 | if (unlikely (pin + hdr > pinend)) |
| 3494 | { |
| 3495 | elf_uncompress_failed (); |
| 3496 | return 0; |
| 3497 | } |
| 3498 | |
| 3499 | /* We no longer need SCRATCH. Start recording weights. We need up to |
| 3500 | 256 bytes of weights and 64 bytes of rank counts, so it won't overlap |
| 3501 | FSE_TABLE. */ |
| 3502 | |
| 3503 | pback = pin + hdr - 1; |
| 3504 | |
| 3505 | if (!elf_fetch_backward_init (ppin: &pback, pinend: pfse, pval: &val, pbits: &bits)) |
| 3506 | return 0; |
| 3507 | |
| 3508 | bits -= fse_table_bits; |
| 3509 | state1 = (val >> bits) & ((1U << fse_table_bits) - 1); |
| 3510 | bits -= fse_table_bits; |
| 3511 | state2 = (val >> bits) & ((1U << fse_table_bits) - 1); |
| 3512 | |
| 3513 | /* There are two independent FSE streams, tracked by STATE1 and STATE2. |
| 3514 | We decode them alternately. */ |
| 3515 | |
| 3516 | count = 0; |
| 3517 | while (1) |
| 3518 | { |
| 3519 | struct elf_zstd_fse_entry *pt; |
| 3520 | uint64_t v; |
| 3521 | |
| 3522 | pt = &fse_table[state1]; |
| 3523 | |
| 3524 | if (unlikely (pin < pinend) && bits < pt->bits) |
| 3525 | { |
| 3526 | if (unlikely (count >= 254)) |
| 3527 | { |
| 3528 | elf_uncompress_failed (); |
| 3529 | return 0; |
| 3530 | } |
| 3531 | weights[count] = (unsigned char) pt->symbol; |
| 3532 | weights[count + 1] = (unsigned char) fse_table[state2].symbol; |
| 3533 | count += 2; |
| 3534 | break; |
| 3535 | } |
| 3536 | |
| 3537 | if (unlikely (pt->bits == 0)) |
| 3538 | v = 0; |
| 3539 | else |
| 3540 | { |
| 3541 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pfse, pval: &val, pbits: &bits)) |
| 3542 | return 0; |
| 3543 | |
| 3544 | bits -= pt->bits; |
| 3545 | v = (val >> bits) & (((uint64_t)1 << pt->bits) - 1); |
| 3546 | } |
| 3547 | |
| 3548 | state1 = pt->base + v; |
| 3549 | |
| 3550 | if (unlikely (count >= 255)) |
| 3551 | { |
| 3552 | elf_uncompress_failed (); |
| 3553 | return 0; |
| 3554 | } |
| 3555 | |
| 3556 | weights[count] = pt->symbol; |
| 3557 | ++count; |
| 3558 | |
| 3559 | pt = &fse_table[state2]; |
| 3560 | |
| 3561 | if (unlikely (pin < pinend && bits < pt->bits)) |
| 3562 | { |
| 3563 | if (unlikely (count >= 254)) |
| 3564 | { |
| 3565 | elf_uncompress_failed (); |
| 3566 | return 0; |
| 3567 | } |
| 3568 | weights[count] = (unsigned char) pt->symbol; |
| 3569 | weights[count + 1] = (unsigned char) fse_table[state1].symbol; |
| 3570 | count += 2; |
| 3571 | break; |
| 3572 | } |
| 3573 | |
| 3574 | if (unlikely (pt->bits == 0)) |
| 3575 | v = 0; |
| 3576 | else |
| 3577 | { |
| 3578 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pfse, pval: &val, pbits: &bits)) |
| 3579 | return 0; |
| 3580 | |
| 3581 | bits -= pt->bits; |
| 3582 | v = (val >> bits) & (((uint64_t)1 << pt->bits) - 1); |
| 3583 | } |
| 3584 | |
| 3585 | state2 = pt->base + v; |
| 3586 | |
| 3587 | if (unlikely (count >= 255)) |
| 3588 | { |
| 3589 | elf_uncompress_failed (); |
| 3590 | return 0; |
| 3591 | } |
| 3592 | |
| 3593 | weights[count] = pt->symbol; |
| 3594 | ++count; |
| 3595 | } |
| 3596 | |
| 3597 | pin += hdr; |
| 3598 | } |
| 3599 | else |
| 3600 | { |
| 3601 | /* Table is not compressed. Each weight is 4 bits. */ |
| 3602 | |
| 3603 | count = hdr - 127; |
| 3604 | if (unlikely (pin + ((count + 1) / 2) >= pinend)) |
| 3605 | { |
| 3606 | elf_uncompress_failed (); |
| 3607 | return 0; |
| 3608 | } |
| 3609 | for (i = 0; i < count; i += 2) |
| 3610 | { |
| 3611 | unsigned char b; |
| 3612 | |
| 3613 | b = *pin; |
| 3614 | ++pin; |
| 3615 | weights[i] = b >> 4; |
| 3616 | weights[i + 1] = b & 0xf; |
| 3617 | } |
| 3618 | } |
| 3619 | |
| 3620 | weight_mark = (uint32_t *) (weights + 256); |
| 3621 | memset (s: weight_mark, c: 0, n: 13 * sizeof (uint32_t)); |
| 3622 | weight_mask = 0; |
| 3623 | for (i = 0; i < count; ++i) |
| 3624 | { |
| 3625 | unsigned char w; |
| 3626 | |
| 3627 | w = weights[i]; |
| 3628 | if (unlikely (w > 12)) |
| 3629 | { |
| 3630 | elf_uncompress_failed (); |
| 3631 | return 0; |
| 3632 | } |
| 3633 | ++weight_mark[w]; |
| 3634 | if (w > 0) |
| 3635 | weight_mask += 1U << (w - 1); |
| 3636 | } |
| 3637 | if (unlikely (weight_mask == 0)) |
| 3638 | { |
| 3639 | elf_uncompress_failed (); |
| 3640 | return 0; |
| 3641 | } |
| 3642 | |
| 3643 | table_bits = 32 - __builtin_clz (weight_mask); |
| 3644 | if (unlikely (table_bits > 11)) |
| 3645 | { |
| 3646 | elf_uncompress_failed (); |
| 3647 | return 0; |
| 3648 | } |
| 3649 | |
| 3650 | /* Work out the last weight value, which is omitted because the weights must |
| 3651 | sum to a power of two. */ |
| 3652 | { |
| 3653 | uint32_t left; |
| 3654 | uint32_t high_bit; |
| 3655 | |
| 3656 | left = ((uint32_t)1 << table_bits) - weight_mask; |
| 3657 | if (left == 0) |
| 3658 | { |
| 3659 | elf_uncompress_failed (); |
| 3660 | return 0; |
| 3661 | } |
| 3662 | high_bit = 31 - __builtin_clz (left); |
| 3663 | if (((uint32_t)1 << high_bit) != left) |
| 3664 | { |
| 3665 | elf_uncompress_failed (); |
| 3666 | return 0; |
| 3667 | } |
| 3668 | |
| 3669 | if (unlikely (count >= 256)) |
| 3670 | { |
| 3671 | elf_uncompress_failed (); |
| 3672 | return 0; |
| 3673 | } |
| 3674 | |
| 3675 | weights[count] = high_bit + 1; |
| 3676 | ++count; |
| 3677 | ++weight_mark[high_bit + 1]; |
| 3678 | } |
| 3679 | |
| 3680 | if (weight_mark[1] < 2 || (weight_mark[1] & 1) != 0) |
| 3681 | { |
| 3682 | elf_uncompress_failed (); |
| 3683 | return 0; |
| 3684 | } |
| 3685 | |
| 3686 | /* Change WEIGHT_MARK from a count of weights to the index of the first |
| 3687 | symbol for that weight. We shift the indexes to also store how many we |
| 3688 | have seen so far, below. */ |
| 3689 | { |
| 3690 | uint32_t next; |
| 3691 | |
| 3692 | next = 0; |
| 3693 | for (i = 0; i < table_bits; ++i) |
| 3694 | { |
| 3695 | uint32_t cur; |
| 3696 | |
| 3697 | cur = next; |
| 3698 | next += weight_mark[i + 1] << i; |
| 3699 | weight_mark[i + 1] = cur; |
| 3700 | } |
| 3701 | } |
| 3702 | |
| 3703 | for (i = 0; i < count; ++i) |
| 3704 | { |
| 3705 | unsigned char weight; |
| 3706 | uint32_t length; |
| 3707 | uint16_t tval; |
| 3708 | size_t start; |
| 3709 | uint32_t j; |
| 3710 | |
| 3711 | weight = weights[i]; |
| 3712 | if (weight == 0) |
| 3713 | continue; |
| 3714 | |
| 3715 | length = 1U << (weight - 1); |
| 3716 | tval = (i << 8) | (table_bits + 1 - weight); |
| 3717 | start = weight_mark[weight]; |
| 3718 | for (j = 0; j < length; ++j) |
| 3719 | table[start + j] = tval; |
| 3720 | weight_mark[weight] += length; |
| 3721 | } |
| 3722 | |
| 3723 | *ppin = pin; |
| 3724 | *ptable_bits = (int)table_bits; |
| 3725 | |
| 3726 | return 1; |
| 3727 | } |
| 3728 | |
| 3729 | /* Read and decompress the literals and store them ending at POUTEND. This |
| 3730 | works because we are going to use all the literals in the output, so they |
| 3731 | must fit into the output buffer. HUFFMAN_TABLE, and PHUFFMAN_TABLE_BITS |
| 3732 | store the Huffman table across calls. SCRATCH is used to read a Huffman |
| 3733 | table. Store the start of the decompressed literals in *PPLIT. Update |
| 3734 | *PPIN. Return 1 on success, 0 on error. */ |
| 3735 | |
| 3736 | static int |
| 3737 | elf_zstd_read_literals (const unsigned char **ppin, |
| 3738 | const unsigned char *pinend, |
| 3739 | unsigned char *pout, |
| 3740 | unsigned char *poutend, |
| 3741 | uint16_t *scratch, |
| 3742 | uint16_t *huffman_table, |
| 3743 | int *phuffman_table_bits, |
| 3744 | unsigned char **pplit) |
| 3745 | { |
| 3746 | const unsigned char *pin; |
| 3747 | unsigned char *plit; |
| 3748 | unsigned char hdr; |
| 3749 | uint32_t regenerated_size; |
| 3750 | uint32_t compressed_size; |
| 3751 | int streams; |
| 3752 | uint32_t total_streams_size; |
| 3753 | unsigned int huffman_table_bits; |
| 3754 | uint64_t huffman_mask; |
| 3755 | |
| 3756 | pin = *ppin; |
| 3757 | if (unlikely (pin >= pinend)) |
| 3758 | { |
| 3759 | elf_uncompress_failed (); |
| 3760 | return 0; |
| 3761 | } |
| 3762 | hdr = *pin; |
| 3763 | ++pin; |
| 3764 | |
| 3765 | if ((hdr & 3) == 0 || (hdr & 3) == 1) |
| 3766 | { |
| 3767 | int raw; |
| 3768 | |
| 3769 | /* Raw_Literals_Block or RLE_Literals_Block */ |
| 3770 | |
| 3771 | raw = (hdr & 3) == 0; |
| 3772 | |
| 3773 | switch ((hdr >> 2) & 3) |
| 3774 | { |
| 3775 | case 0: case 2: |
| 3776 | regenerated_size = hdr >> 3; |
| 3777 | break; |
| 3778 | case 1: |
| 3779 | if (unlikely (pin >= pinend)) |
| 3780 | { |
| 3781 | elf_uncompress_failed (); |
| 3782 | return 0; |
| 3783 | } |
| 3784 | regenerated_size = (hdr >> 4) + ((uint32_t)(*pin) << 4); |
| 3785 | ++pin; |
| 3786 | break; |
| 3787 | case 3: |
| 3788 | if (unlikely (pin + 1 >= pinend)) |
| 3789 | { |
| 3790 | elf_uncompress_failed (); |
| 3791 | return 0; |
| 3792 | } |
| 3793 | regenerated_size = ((hdr >> 4) |
| 3794 | + ((uint32_t)*pin << 4) |
| 3795 | + ((uint32_t)pin[1] << 12)); |
| 3796 | pin += 2; |
| 3797 | break; |
| 3798 | default: |
| 3799 | elf_uncompress_failed (); |
| 3800 | return 0; |
| 3801 | } |
| 3802 | |
| 3803 | if (unlikely ((size_t)(poutend - pout) < regenerated_size)) |
| 3804 | { |
| 3805 | elf_uncompress_failed (); |
| 3806 | return 0; |
| 3807 | } |
| 3808 | |
| 3809 | plit = poutend - regenerated_size; |
| 3810 | |
| 3811 | if (raw) |
| 3812 | { |
| 3813 | if (unlikely (pin + regenerated_size >= pinend)) |
| 3814 | { |
| 3815 | elf_uncompress_failed (); |
| 3816 | return 0; |
| 3817 | } |
| 3818 | memcpy (dest: plit, src: pin, n: regenerated_size); |
| 3819 | pin += regenerated_size; |
| 3820 | } |
| 3821 | else |
| 3822 | { |
| 3823 | if (pin >= pinend) |
| 3824 | { |
| 3825 | elf_uncompress_failed (); |
| 3826 | return 0; |
| 3827 | } |
| 3828 | memset (s: plit, c: *pin, n: regenerated_size); |
| 3829 | ++pin; |
| 3830 | } |
| 3831 | |
| 3832 | *ppin = pin; |
| 3833 | *pplit = plit; |
| 3834 | |
| 3835 | return 1; |
| 3836 | } |
| 3837 | |
| 3838 | /* Compressed_Literals_Block or Treeless_Literals_Block */ |
| 3839 | |
| 3840 | switch ((hdr >> 2) & 3) |
| 3841 | { |
| 3842 | case 0: case 1: |
| 3843 | if (unlikely (pin + 1 >= pinend)) |
| 3844 | { |
| 3845 | elf_uncompress_failed (); |
| 3846 | return 0; |
| 3847 | } |
| 3848 | regenerated_size = (hdr >> 4) | ((uint32_t)(*pin & 0x3f) << 4); |
| 3849 | compressed_size = (uint32_t)*pin >> 6 | ((uint32_t)pin[1] << 2); |
| 3850 | pin += 2; |
| 3851 | streams = ((hdr >> 2) & 3) == 0 ? 1 : 4; |
| 3852 | break; |
| 3853 | case 2: |
| 3854 | if (unlikely (pin + 2 >= pinend)) |
| 3855 | { |
| 3856 | elf_uncompress_failed (); |
| 3857 | return 0; |
| 3858 | } |
| 3859 | regenerated_size = (((uint32_t)hdr >> 4) |
| 3860 | | ((uint32_t)*pin << 4) |
| 3861 | | (((uint32_t)pin[1] & 3) << 12)); |
| 3862 | compressed_size = (((uint32_t)pin[1] >> 2) |
| 3863 | | ((uint32_t)pin[2] << 6)); |
| 3864 | pin += 3; |
| 3865 | streams = 4; |
| 3866 | break; |
| 3867 | case 3: |
| 3868 | if (unlikely (pin + 3 >= pinend)) |
| 3869 | { |
| 3870 | elf_uncompress_failed (); |
| 3871 | return 0; |
| 3872 | } |
| 3873 | regenerated_size = (((uint32_t)hdr >> 4) |
| 3874 | | ((uint32_t)*pin << 4) |
| 3875 | | (((uint32_t)pin[1] & 0x3f) << 12)); |
| 3876 | compressed_size = (((uint32_t)pin[1] >> 6) |
| 3877 | | ((uint32_t)pin[2] << 2) |
| 3878 | | ((uint32_t)pin[3] << 10)); |
| 3879 | pin += 4; |
| 3880 | streams = 4; |
| 3881 | break; |
| 3882 | default: |
| 3883 | elf_uncompress_failed (); |
| 3884 | return 0; |
| 3885 | } |
| 3886 | |
| 3887 | if (unlikely (pin + compressed_size > pinend)) |
| 3888 | { |
| 3889 | elf_uncompress_failed (); |
| 3890 | return 0; |
| 3891 | } |
| 3892 | |
| 3893 | pinend = pin + compressed_size; |
| 3894 | *ppin = pinend; |
| 3895 | |
| 3896 | if (unlikely ((size_t)(poutend - pout) < regenerated_size)) |
| 3897 | { |
| 3898 | elf_uncompress_failed (); |
| 3899 | return 0; |
| 3900 | } |
| 3901 | |
| 3902 | plit = poutend - regenerated_size; |
| 3903 | |
| 3904 | *pplit = plit; |
| 3905 | |
| 3906 | total_streams_size = compressed_size; |
| 3907 | if ((hdr & 3) == 2) |
| 3908 | { |
| 3909 | const unsigned char *ptable; |
| 3910 | |
| 3911 | /* Compressed_Literals_Block. Read Huffman tree. */ |
| 3912 | |
| 3913 | ptable = pin; |
| 3914 | if (!elf_zstd_read_huff (ppin: &ptable, pinend, zdebug_table: scratch, table: huffman_table, |
| 3915 | ptable_bits: phuffman_table_bits)) |
| 3916 | return 0; |
| 3917 | |
| 3918 | if (unlikely (total_streams_size < (size_t)(ptable - pin))) |
| 3919 | { |
| 3920 | elf_uncompress_failed (); |
| 3921 | return 0; |
| 3922 | } |
| 3923 | |
| 3924 | total_streams_size -= ptable - pin; |
| 3925 | pin = ptable; |
| 3926 | } |
| 3927 | else |
| 3928 | { |
| 3929 | /* Treeless_Literals_Block. Reuse previous Huffman tree. */ |
| 3930 | if (unlikely (*phuffman_table_bits == 0)) |
| 3931 | { |
| 3932 | elf_uncompress_failed (); |
| 3933 | return 0; |
| 3934 | } |
| 3935 | } |
| 3936 | |
| 3937 | /* Decompress COMPRESSED_SIZE bytes of data at PIN using the huffman table, |
| 3938 | storing REGENERATED_SIZE bytes of decompressed data at PLIT. */ |
| 3939 | |
| 3940 | huffman_table_bits = (unsigned int)*phuffman_table_bits; |
| 3941 | huffman_mask = ((uint64_t)1 << huffman_table_bits) - 1; |
| 3942 | |
| 3943 | if (streams == 1) |
| 3944 | { |
| 3945 | const unsigned char *pback; |
| 3946 | const unsigned char *pbackend; |
| 3947 | uint64_t val; |
| 3948 | unsigned int bits; |
| 3949 | uint32_t i; |
| 3950 | |
| 3951 | pback = pin + total_streams_size - 1; |
| 3952 | pbackend = pin; |
| 3953 | if (!elf_fetch_backward_init (ppin: &pback, pinend: pbackend, pval: &val, pbits: &bits)) |
| 3954 | return 0; |
| 3955 | |
| 3956 | /* This is one of the inner loops of the decompression algorithm, so we |
| 3957 | put some effort into optimization. We can't get more than 64 bytes |
| 3958 | from a single call to elf_fetch_bits_backward, and we can't subtract |
| 3959 | more than 11 bits at a time. */ |
| 3960 | |
| 3961 | if (regenerated_size >= 64) |
| 3962 | { |
| 3963 | unsigned char *plitstart; |
| 3964 | unsigned char *plitstop; |
| 3965 | |
| 3966 | plitstart = plit; |
| 3967 | plitstop = plit + regenerated_size - 64; |
| 3968 | while (plit < plitstop) |
| 3969 | { |
| 3970 | uint16_t t; |
| 3971 | |
| 3972 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pbackend, pval: &val, pbits: &bits)) |
| 3973 | return 0; |
| 3974 | |
| 3975 | if (bits < 16) |
| 3976 | break; |
| 3977 | |
| 3978 | while (bits >= 33) |
| 3979 | { |
| 3980 | t = huffman_table[(val >> (bits - huffman_table_bits)) |
| 3981 | & huffman_mask]; |
| 3982 | *plit = t >> 8; |
| 3983 | ++plit; |
| 3984 | bits -= t & 0xff; |
| 3985 | |
| 3986 | t = huffman_table[(val >> (bits - huffman_table_bits)) |
| 3987 | & huffman_mask]; |
| 3988 | *plit = t >> 8; |
| 3989 | ++plit; |
| 3990 | bits -= t & 0xff; |
| 3991 | |
| 3992 | t = huffman_table[(val >> (bits - huffman_table_bits)) |
| 3993 | & huffman_mask]; |
| 3994 | *plit = t >> 8; |
| 3995 | ++plit; |
| 3996 | bits -= t & 0xff; |
| 3997 | } |
| 3998 | |
| 3999 | while (bits > 11) |
| 4000 | { |
| 4001 | t = huffman_table[(val >> (bits - huffman_table_bits)) |
| 4002 | & huffman_mask]; |
| 4003 | *plit = t >> 8; |
| 4004 | ++plit; |
| 4005 | bits -= t & 0xff; |
| 4006 | } |
| 4007 | } |
| 4008 | |
| 4009 | regenerated_size -= plit - plitstart; |
| 4010 | } |
| 4011 | |
| 4012 | for (i = 0; i < regenerated_size; ++i) |
| 4013 | { |
| 4014 | uint16_t t; |
| 4015 | |
| 4016 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pbackend, pval: &val, pbits: &bits)) |
| 4017 | return 0; |
| 4018 | |
| 4019 | if (unlikely (bits < huffman_table_bits)) |
| 4020 | { |
| 4021 | t = huffman_table[(val << (huffman_table_bits - bits)) |
| 4022 | & huffman_mask]; |
| 4023 | if (unlikely (bits < (t & 0xff))) |
| 4024 | { |
| 4025 | elf_uncompress_failed (); |
| 4026 | return 0; |
| 4027 | } |
| 4028 | } |
| 4029 | else |
| 4030 | t = huffman_table[(val >> (bits - huffman_table_bits)) |
| 4031 | & huffman_mask]; |
| 4032 | |
| 4033 | *plit = t >> 8; |
| 4034 | ++plit; |
| 4035 | bits -= t & 0xff; |
| 4036 | } |
| 4037 | |
| 4038 | return 1; |
| 4039 | } |
| 4040 | |
| 4041 | { |
| 4042 | uint32_t stream_size1, stream_size2, stream_size3, stream_size4; |
| 4043 | uint32_t tot; |
| 4044 | const unsigned char *pback1, *pback2, *pback3, *pback4; |
| 4045 | const unsigned char *pbackend1, *pbackend2, *pbackend3, *pbackend4; |
| 4046 | uint64_t val1, val2, val3, val4; |
| 4047 | unsigned int bits1, bits2, bits3, bits4; |
| 4048 | unsigned char *plit1, *plit2, *plit3, *plit4; |
| 4049 | uint32_t regenerated_stream_size; |
| 4050 | uint32_t regenerated_stream_size4; |
| 4051 | uint16_t t1, t2, t3, t4; |
| 4052 | uint32_t i; |
| 4053 | uint32_t limit; |
| 4054 | |
| 4055 | /* Read jump table. */ |
| 4056 | if (unlikely (pin + 5 >= pinend)) |
| 4057 | { |
| 4058 | elf_uncompress_failed (); |
| 4059 | return 0; |
| 4060 | } |
| 4061 | stream_size1 = (uint32_t)*pin | ((uint32_t)pin[1] << 8); |
| 4062 | pin += 2; |
| 4063 | stream_size2 = (uint32_t)*pin | ((uint32_t)pin[1] << 8); |
| 4064 | pin += 2; |
| 4065 | stream_size3 = (uint32_t)*pin | ((uint32_t)pin[1] << 8); |
| 4066 | pin += 2; |
| 4067 | tot = stream_size1 + stream_size2 + stream_size3; |
| 4068 | if (unlikely (tot > total_streams_size - 6)) |
| 4069 | { |
| 4070 | elf_uncompress_failed (); |
| 4071 | return 0; |
| 4072 | } |
| 4073 | stream_size4 = total_streams_size - 6 - tot; |
| 4074 | |
| 4075 | pback1 = pin + stream_size1 - 1; |
| 4076 | pbackend1 = pin; |
| 4077 | |
| 4078 | pback2 = pback1 + stream_size2; |
| 4079 | pbackend2 = pback1 + 1; |
| 4080 | |
| 4081 | pback3 = pback2 + stream_size3; |
| 4082 | pbackend3 = pback2 + 1; |
| 4083 | |
| 4084 | pback4 = pback3 + stream_size4; |
| 4085 | pbackend4 = pback3 + 1; |
| 4086 | |
| 4087 | if (!elf_fetch_backward_init (ppin: &pback1, pinend: pbackend1, pval: &val1, pbits: &bits1)) |
| 4088 | return 0; |
| 4089 | if (!elf_fetch_backward_init (ppin: &pback2, pinend: pbackend2, pval: &val2, pbits: &bits2)) |
| 4090 | return 0; |
| 4091 | if (!elf_fetch_backward_init (ppin: &pback3, pinend: pbackend3, pval: &val3, pbits: &bits3)) |
| 4092 | return 0; |
| 4093 | if (!elf_fetch_backward_init (ppin: &pback4, pinend: pbackend4, pval: &val4, pbits: &bits4)) |
| 4094 | return 0; |
| 4095 | |
| 4096 | regenerated_stream_size = (regenerated_size + 3) / 4; |
| 4097 | |
| 4098 | plit1 = plit; |
| 4099 | plit2 = plit1 + regenerated_stream_size; |
| 4100 | plit3 = plit2 + regenerated_stream_size; |
| 4101 | plit4 = plit3 + regenerated_stream_size; |
| 4102 | |
| 4103 | regenerated_stream_size4 = regenerated_size - regenerated_stream_size * 3; |
| 4104 | |
| 4105 | /* We can't get more than 64 literal bytes from a single call to |
| 4106 | elf_fetch_bits_backward. The fourth stream can be up to 3 bytes less, |
| 4107 | so use as the limit. */ |
| 4108 | |
| 4109 | limit = regenerated_stream_size4 <= 64 ? 0 : regenerated_stream_size4 - 64; |
| 4110 | i = 0; |
| 4111 | while (i < limit) |
| 4112 | { |
| 4113 | if (!elf_fetch_bits_backward (ppin: &pback1, pinend: pbackend1, pval: &val1, pbits: &bits1)) |
| 4114 | return 0; |
| 4115 | if (!elf_fetch_bits_backward (ppin: &pback2, pinend: pbackend2, pval: &val2, pbits: &bits2)) |
| 4116 | return 0; |
| 4117 | if (!elf_fetch_bits_backward (ppin: &pback3, pinend: pbackend3, pval: &val3, pbits: &bits3)) |
| 4118 | return 0; |
| 4119 | if (!elf_fetch_bits_backward (ppin: &pback4, pinend: pbackend4, pval: &val4, pbits: &bits4)) |
| 4120 | return 0; |
| 4121 | |
| 4122 | /* We can't subtract more than 11 bits at a time. */ |
| 4123 | |
| 4124 | do |
| 4125 | { |
| 4126 | t1 = huffman_table[(val1 >> (bits1 - huffman_table_bits)) |
| 4127 | & huffman_mask]; |
| 4128 | t2 = huffman_table[(val2 >> (bits2 - huffman_table_bits)) |
| 4129 | & huffman_mask]; |
| 4130 | t3 = huffman_table[(val3 >> (bits3 - huffman_table_bits)) |
| 4131 | & huffman_mask]; |
| 4132 | t4 = huffman_table[(val4 >> (bits4 - huffman_table_bits)) |
| 4133 | & huffman_mask]; |
| 4134 | |
| 4135 | *plit1 = t1 >> 8; |
| 4136 | ++plit1; |
| 4137 | bits1 -= t1 & 0xff; |
| 4138 | |
| 4139 | *plit2 = t2 >> 8; |
| 4140 | ++plit2; |
| 4141 | bits2 -= t2 & 0xff; |
| 4142 | |
| 4143 | *plit3 = t3 >> 8; |
| 4144 | ++plit3; |
| 4145 | bits3 -= t3 & 0xff; |
| 4146 | |
| 4147 | *plit4 = t4 >> 8; |
| 4148 | ++plit4; |
| 4149 | bits4 -= t4 & 0xff; |
| 4150 | |
| 4151 | ++i; |
| 4152 | } |
| 4153 | while (bits1 > 11 && bits2 > 11 && bits3 > 11 && bits4 > 11); |
| 4154 | } |
| 4155 | |
| 4156 | while (i < regenerated_stream_size) |
| 4157 | { |
| 4158 | int use4; |
| 4159 | |
| 4160 | use4 = i < regenerated_stream_size4; |
| 4161 | |
| 4162 | if (!elf_fetch_bits_backward (ppin: &pback1, pinend: pbackend1, pval: &val1, pbits: &bits1)) |
| 4163 | return 0; |
| 4164 | if (!elf_fetch_bits_backward (ppin: &pback2, pinend: pbackend2, pval: &val2, pbits: &bits2)) |
| 4165 | return 0; |
| 4166 | if (!elf_fetch_bits_backward (ppin: &pback3, pinend: pbackend3, pval: &val3, pbits: &bits3)) |
| 4167 | return 0; |
| 4168 | if (use4) |
| 4169 | { |
| 4170 | if (!elf_fetch_bits_backward (ppin: &pback4, pinend: pbackend4, pval: &val4, pbits: &bits4)) |
| 4171 | return 0; |
| 4172 | } |
| 4173 | |
| 4174 | if (unlikely (bits1 < huffman_table_bits)) |
| 4175 | { |
| 4176 | t1 = huffman_table[(val1 << (huffman_table_bits - bits1)) |
| 4177 | & huffman_mask]; |
| 4178 | if (unlikely (bits1 < (t1 & 0xff))) |
| 4179 | { |
| 4180 | elf_uncompress_failed (); |
| 4181 | return 0; |
| 4182 | } |
| 4183 | } |
| 4184 | else |
| 4185 | t1 = huffman_table[(val1 >> (bits1 - huffman_table_bits)) |
| 4186 | & huffman_mask]; |
| 4187 | |
| 4188 | if (unlikely (bits2 < huffman_table_bits)) |
| 4189 | { |
| 4190 | t2 = huffman_table[(val2 << (huffman_table_bits - bits2)) |
| 4191 | & huffman_mask]; |
| 4192 | if (unlikely (bits2 < (t2 & 0xff))) |
| 4193 | { |
| 4194 | elf_uncompress_failed (); |
| 4195 | return 0; |
| 4196 | } |
| 4197 | } |
| 4198 | else |
| 4199 | t2 = huffman_table[(val2 >> (bits2 - huffman_table_bits)) |
| 4200 | & huffman_mask]; |
| 4201 | |
| 4202 | if (unlikely (bits3 < huffman_table_bits)) |
| 4203 | { |
| 4204 | t3 = huffman_table[(val3 << (huffman_table_bits - bits3)) |
| 4205 | & huffman_mask]; |
| 4206 | if (unlikely (bits3 < (t3 & 0xff))) |
| 4207 | { |
| 4208 | elf_uncompress_failed (); |
| 4209 | return 0; |
| 4210 | } |
| 4211 | } |
| 4212 | else |
| 4213 | t3 = huffman_table[(val3 >> (bits3 - huffman_table_bits)) |
| 4214 | & huffman_mask]; |
| 4215 | |
| 4216 | if (use4) |
| 4217 | { |
| 4218 | if (unlikely (bits4 < huffman_table_bits)) |
| 4219 | { |
| 4220 | t4 = huffman_table[(val4 << (huffman_table_bits - bits4)) |
| 4221 | & huffman_mask]; |
| 4222 | if (unlikely (bits4 < (t4 & 0xff))) |
| 4223 | { |
| 4224 | elf_uncompress_failed (); |
| 4225 | return 0; |
| 4226 | } |
| 4227 | } |
| 4228 | else |
| 4229 | t4 = huffman_table[(val4 >> (bits4 - huffman_table_bits)) |
| 4230 | & huffman_mask]; |
| 4231 | |
| 4232 | *plit4 = t4 >> 8; |
| 4233 | ++plit4; |
| 4234 | bits4 -= t4 & 0xff; |
| 4235 | } |
| 4236 | |
| 4237 | *plit1 = t1 >> 8; |
| 4238 | ++plit1; |
| 4239 | bits1 -= t1 & 0xff; |
| 4240 | |
| 4241 | *plit2 = t2 >> 8; |
| 4242 | ++plit2; |
| 4243 | bits2 -= t2 & 0xff; |
| 4244 | |
| 4245 | *plit3 = t3 >> 8; |
| 4246 | ++plit3; |
| 4247 | bits3 -= t3 & 0xff; |
| 4248 | |
| 4249 | ++i; |
| 4250 | } |
| 4251 | } |
| 4252 | |
| 4253 | return 1; |
| 4254 | } |
| 4255 | |
| 4256 | /* The information used to decompress a sequence code, which can be a literal |
| 4257 | length, an offset, or a match length. */ |
| 4258 | |
| 4259 | struct elf_zstd_seq_decode |
| 4260 | { |
| 4261 | const struct elf_zstd_fse_baseline_entry *table; |
| 4262 | int table_bits; |
| 4263 | }; |
| 4264 | |
| 4265 | /* Unpack a sequence code compression mode. */ |
| 4266 | |
| 4267 | static int |
| 4268 | elf_zstd_unpack_seq_decode (int mode, |
| 4269 | const unsigned char **ppin, |
| 4270 | const unsigned char *pinend, |
| 4271 | const struct elf_zstd_fse_baseline_entry *predef, |
| 4272 | int predef_bits, |
| 4273 | uint16_t *scratch, |
| 4274 | int maxidx, |
| 4275 | struct elf_zstd_fse_baseline_entry *table, |
| 4276 | int table_bits, |
| 4277 | int (*conv)(const struct elf_zstd_fse_entry *, |
| 4278 | int, |
| 4279 | struct elf_zstd_fse_baseline_entry *), |
| 4280 | struct elf_zstd_seq_decode *decode) |
| 4281 | { |
| 4282 | switch (mode) |
| 4283 | { |
| 4284 | case 0: |
| 4285 | decode->table = predef; |
| 4286 | decode->table_bits = predef_bits; |
| 4287 | break; |
| 4288 | |
| 4289 | case 1: |
| 4290 | { |
| 4291 | struct elf_zstd_fse_entry entry; |
| 4292 | |
| 4293 | if (unlikely (*ppin >= pinend)) |
| 4294 | { |
| 4295 | elf_uncompress_failed (); |
| 4296 | return 0; |
| 4297 | } |
| 4298 | entry.symbol = **ppin; |
| 4299 | ++*ppin; |
| 4300 | entry.bits = 0; |
| 4301 | entry.base = 0; |
| 4302 | decode->table_bits = 0; |
| 4303 | if (!conv (&entry, 0, table)) |
| 4304 | return 0; |
| 4305 | } |
| 4306 | break; |
| 4307 | |
| 4308 | case 2: |
| 4309 | { |
| 4310 | struct elf_zstd_fse_entry *fse_table; |
| 4311 | |
| 4312 | /* We use the same space for the simple FSE table and the baseline |
| 4313 | table. */ |
| 4314 | fse_table = (struct elf_zstd_fse_entry *)table; |
| 4315 | decode->table_bits = table_bits; |
| 4316 | if (!elf_zstd_read_fse (ppin, pinend, zdebug_table: scratch, maxidx, table: fse_table, |
| 4317 | table_bits: &decode->table_bits)) |
| 4318 | return 0; |
| 4319 | if (!conv (fse_table, decode->table_bits, table)) |
| 4320 | return 0; |
| 4321 | decode->table = table; |
| 4322 | } |
| 4323 | break; |
| 4324 | |
| 4325 | case 3: |
| 4326 | if (unlikely (decode->table_bits == -1)) |
| 4327 | { |
| 4328 | elf_uncompress_failed (); |
| 4329 | return 0; |
| 4330 | } |
| 4331 | break; |
| 4332 | |
| 4333 | default: |
| 4334 | elf_uncompress_failed (); |
| 4335 | return 0; |
| 4336 | } |
| 4337 | |
| 4338 | return 1; |
| 4339 | } |
| 4340 | |
| 4341 | /* Decompress a zstd stream from PIN/SIN to POUT/SOUT. Code based on RFC 8878. |
| 4342 | Return 1 on success, 0 on error. */ |
| 4343 | |
| 4344 | static int |
| 4345 | elf_zstd_decompress (const unsigned char *pin, size_t sin, |
| 4346 | unsigned char *zdebug_table, unsigned char *pout, |
| 4347 | size_t sout) |
| 4348 | { |
| 4349 | const unsigned char *pinend; |
| 4350 | unsigned char *poutstart; |
| 4351 | unsigned char *poutend; |
| 4352 | struct elf_zstd_seq_decode literal_decode; |
| 4353 | struct elf_zstd_fse_baseline_entry *literal_fse_table; |
| 4354 | struct elf_zstd_seq_decode match_decode; |
| 4355 | struct elf_zstd_fse_baseline_entry *match_fse_table; |
| 4356 | struct elf_zstd_seq_decode offset_decode; |
| 4357 | struct elf_zstd_fse_baseline_entry *offset_fse_table; |
| 4358 | uint16_t *huffman_table; |
| 4359 | int huffman_table_bits; |
| 4360 | uint32_t repeated_offset1; |
| 4361 | uint32_t repeated_offset2; |
| 4362 | uint32_t repeated_offset3; |
| 4363 | uint16_t *scratch; |
| 4364 | unsigned char hdr; |
| 4365 | int has_checksum; |
| 4366 | uint64_t content_size; |
| 4367 | int last_block; |
| 4368 | |
| 4369 | pinend = pin + sin; |
| 4370 | poutstart = pout; |
| 4371 | poutend = pout + sout; |
| 4372 | |
| 4373 | literal_decode.table = NULL; |
| 4374 | literal_decode.table_bits = -1; |
| 4375 | literal_fse_table = ((struct elf_zstd_fse_baseline_entry *) |
| 4376 | (zdebug_table + ZSTD_TABLE_LITERAL_FSE_OFFSET)); |
| 4377 | |
| 4378 | match_decode.table = NULL; |
| 4379 | match_decode.table_bits = -1; |
| 4380 | match_fse_table = ((struct elf_zstd_fse_baseline_entry *) |
| 4381 | (zdebug_table + ZSTD_TABLE_MATCH_FSE_OFFSET)); |
| 4382 | |
| 4383 | offset_decode.table = NULL; |
| 4384 | offset_decode.table_bits = -1; |
| 4385 | offset_fse_table = ((struct elf_zstd_fse_baseline_entry *) |
| 4386 | (zdebug_table + ZSTD_TABLE_OFFSET_FSE_OFFSET)); |
| 4387 | huffman_table = ((uint16_t *) |
| 4388 | (zdebug_table + ZSTD_TABLE_HUFFMAN_OFFSET)); |
| 4389 | huffman_table_bits = 0; |
| 4390 | scratch = ((uint16_t *) |
| 4391 | (zdebug_table + ZSTD_TABLE_WORK_OFFSET)); |
| 4392 | |
| 4393 | repeated_offset1 = 1; |
| 4394 | repeated_offset2 = 4; |
| 4395 | repeated_offset3 = 8; |
| 4396 | |
| 4397 | if (unlikely (sin < 4)) |
| 4398 | { |
| 4399 | elf_uncompress_failed (); |
| 4400 | return 0; |
| 4401 | } |
| 4402 | |
| 4403 | /* These values are the zstd magic number. */ |
| 4404 | if (unlikely (pin[0] != 0x28 |
| 4405 | || pin[1] != 0xb5 |
| 4406 | || pin[2] != 0x2f |
| 4407 | || pin[3] != 0xfd)) |
| 4408 | { |
| 4409 | elf_uncompress_failed (); |
| 4410 | return 0; |
| 4411 | } |
| 4412 | |
| 4413 | pin += 4; |
| 4414 | |
| 4415 | if (unlikely (pin >= pinend)) |
| 4416 | { |
| 4417 | elf_uncompress_failed (); |
| 4418 | return 0; |
| 4419 | } |
| 4420 | |
| 4421 | hdr = *pin++; |
| 4422 | |
| 4423 | /* We expect a single frame. */ |
| 4424 | if (unlikely ((hdr & (1 << 5)) == 0)) |
| 4425 | { |
| 4426 | elf_uncompress_failed (); |
| 4427 | return 0; |
| 4428 | } |
| 4429 | /* Reserved bit must be zero. */ |
| 4430 | if (unlikely ((hdr & (1 << 3)) != 0)) |
| 4431 | { |
| 4432 | elf_uncompress_failed (); |
| 4433 | return 0; |
| 4434 | } |
| 4435 | /* We do not expect a dictionary. */ |
| 4436 | if (unlikely ((hdr & 3) != 0)) |
| 4437 | { |
| 4438 | elf_uncompress_failed (); |
| 4439 | return 0; |
| 4440 | } |
| 4441 | has_checksum = (hdr & (1 << 2)) != 0; |
| 4442 | switch (hdr >> 6) |
| 4443 | { |
| 4444 | case 0: |
| 4445 | if (unlikely (pin >= pinend)) |
| 4446 | { |
| 4447 | elf_uncompress_failed (); |
| 4448 | return 0; |
| 4449 | } |
| 4450 | content_size = (uint64_t) *pin++; |
| 4451 | break; |
| 4452 | case 1: |
| 4453 | if (unlikely (pin + 1 >= pinend)) |
| 4454 | { |
| 4455 | elf_uncompress_failed (); |
| 4456 | return 0; |
| 4457 | } |
| 4458 | content_size = (((uint64_t) pin[0]) | (((uint64_t) pin[1]) << 8)) + 256; |
| 4459 | pin += 2; |
| 4460 | break; |
| 4461 | case 2: |
| 4462 | if (unlikely (pin + 3 >= pinend)) |
| 4463 | { |
| 4464 | elf_uncompress_failed (); |
| 4465 | return 0; |
| 4466 | } |
| 4467 | content_size = ((uint64_t) pin[0] |
| 4468 | | (((uint64_t) pin[1]) << 8) |
| 4469 | | (((uint64_t) pin[2]) << 16) |
| 4470 | | (((uint64_t) pin[3]) << 24)); |
| 4471 | pin += 4; |
| 4472 | break; |
| 4473 | case 3: |
| 4474 | if (unlikely (pin + 7 >= pinend)) |
| 4475 | { |
| 4476 | elf_uncompress_failed (); |
| 4477 | return 0; |
| 4478 | } |
| 4479 | content_size = ((uint64_t) pin[0] |
| 4480 | | (((uint64_t) pin[1]) << 8) |
| 4481 | | (((uint64_t) pin[2]) << 16) |
| 4482 | | (((uint64_t) pin[3]) << 24) |
| 4483 | | (((uint64_t) pin[4]) << 32) |
| 4484 | | (((uint64_t) pin[5]) << 40) |
| 4485 | | (((uint64_t) pin[6]) << 48) |
| 4486 | | (((uint64_t) pin[7]) << 56)); |
| 4487 | pin += 8; |
| 4488 | break; |
| 4489 | default: |
| 4490 | elf_uncompress_failed (); |
| 4491 | return 0; |
| 4492 | } |
| 4493 | |
| 4494 | if (unlikely (content_size != (size_t) content_size |
| 4495 | || (size_t) content_size != sout)) |
| 4496 | { |
| 4497 | elf_uncompress_failed (); |
| 4498 | return 0; |
| 4499 | } |
| 4500 | |
| 4501 | last_block = 0; |
| 4502 | while (!last_block) |
| 4503 | { |
| 4504 | uint32_t block_hdr; |
| 4505 | int block_type; |
| 4506 | uint32_t block_size; |
| 4507 | |
| 4508 | if (unlikely (pin + 2 >= pinend)) |
| 4509 | { |
| 4510 | elf_uncompress_failed (); |
| 4511 | return 0; |
| 4512 | } |
| 4513 | block_hdr = ((uint32_t) pin[0] |
| 4514 | | (((uint32_t) pin[1]) << 8) |
| 4515 | | (((uint32_t) pin[2]) << 16)); |
| 4516 | pin += 3; |
| 4517 | |
| 4518 | last_block = block_hdr & 1; |
| 4519 | block_type = (block_hdr >> 1) & 3; |
| 4520 | block_size = block_hdr >> 3; |
| 4521 | |
| 4522 | switch (block_type) |
| 4523 | { |
| 4524 | case 0: |
| 4525 | /* Raw_Block */ |
| 4526 | if (unlikely ((size_t) block_size > (size_t) (pinend - pin))) |
| 4527 | { |
| 4528 | elf_uncompress_failed (); |
| 4529 | return 0; |
| 4530 | } |
| 4531 | if (unlikely ((size_t) block_size > (size_t) (poutend - pout))) |
| 4532 | { |
| 4533 | elf_uncompress_failed (); |
| 4534 | return 0; |
| 4535 | } |
| 4536 | memcpy (dest: pout, src: pin, n: block_size); |
| 4537 | pout += block_size; |
| 4538 | pin += block_size; |
| 4539 | break; |
| 4540 | |
| 4541 | case 1: |
| 4542 | /* RLE_Block */ |
| 4543 | if (unlikely (pin >= pinend)) |
| 4544 | { |
| 4545 | elf_uncompress_failed (); |
| 4546 | return 0; |
| 4547 | } |
| 4548 | if (unlikely ((size_t) block_size > (size_t) (poutend - pout))) |
| 4549 | { |
| 4550 | elf_uncompress_failed (); |
| 4551 | return 0; |
| 4552 | } |
| 4553 | memset (s: pout, c: *pin, n: block_size); |
| 4554 | pout += block_size; |
| 4555 | pin++; |
| 4556 | break; |
| 4557 | |
| 4558 | case 2: |
| 4559 | { |
| 4560 | const unsigned char *pblockend; |
| 4561 | unsigned char *plitstack; |
| 4562 | unsigned char *plit; |
| 4563 | uint32_t literal_count; |
| 4564 | unsigned char seq_hdr; |
| 4565 | size_t seq_count; |
| 4566 | size_t seq; |
| 4567 | const unsigned char *pback; |
| 4568 | uint64_t val; |
| 4569 | unsigned int bits; |
| 4570 | unsigned int literal_state; |
| 4571 | unsigned int offset_state; |
| 4572 | unsigned int match_state; |
| 4573 | |
| 4574 | /* Compressed_Block */ |
| 4575 | if (unlikely ((size_t) block_size > (size_t) (pinend - pin))) |
| 4576 | { |
| 4577 | elf_uncompress_failed (); |
| 4578 | return 0; |
| 4579 | } |
| 4580 | |
| 4581 | pblockend = pin + block_size; |
| 4582 | |
| 4583 | /* Read the literals into the end of the output space, and leave |
| 4584 | PLIT pointing at them. */ |
| 4585 | |
| 4586 | if (!elf_zstd_read_literals (ppin: &pin, pinend: pblockend, pout, poutend, |
| 4587 | scratch, huffman_table, |
| 4588 | phuffman_table_bits: &huffman_table_bits, |
| 4589 | pplit: &plitstack)) |
| 4590 | return 0; |
| 4591 | plit = plitstack; |
| 4592 | literal_count = poutend - plit; |
| 4593 | |
| 4594 | seq_hdr = *pin; |
| 4595 | pin++; |
| 4596 | if (seq_hdr < 128) |
| 4597 | seq_count = seq_hdr; |
| 4598 | else if (seq_hdr < 255) |
| 4599 | { |
| 4600 | if (unlikely (pin >= pinend)) |
| 4601 | { |
| 4602 | elf_uncompress_failed (); |
| 4603 | return 0; |
| 4604 | } |
| 4605 | seq_count = ((seq_hdr - 128) << 8) + *pin; |
| 4606 | pin++; |
| 4607 | } |
| 4608 | else |
| 4609 | { |
| 4610 | if (unlikely (pin + 1 >= pinend)) |
| 4611 | { |
| 4612 | elf_uncompress_failed (); |
| 4613 | return 0; |
| 4614 | } |
| 4615 | seq_count = *pin + (pin[1] << 8) + 0x7f00; |
| 4616 | pin += 2; |
| 4617 | } |
| 4618 | |
| 4619 | if (seq_count > 0) |
| 4620 | { |
| 4621 | int (*pfn)(const struct elf_zstd_fse_entry *, |
| 4622 | int, struct elf_zstd_fse_baseline_entry *); |
| 4623 | |
| 4624 | if (unlikely (pin >= pinend)) |
| 4625 | { |
| 4626 | elf_uncompress_failed (); |
| 4627 | return 0; |
| 4628 | } |
| 4629 | seq_hdr = *pin; |
| 4630 | ++pin; |
| 4631 | |
| 4632 | pfn = elf_zstd_make_literal_baseline_fse; |
| 4633 | if (!elf_zstd_unpack_seq_decode (mode: (seq_hdr >> 6) & 3, |
| 4634 | ppin: &pin, pinend, |
| 4635 | predef: &elf_zstd_lit_table[0], predef_bits: 6, |
| 4636 | scratch, maxidx: 35, |
| 4637 | table: literal_fse_table, table_bits: 9, conv: pfn, |
| 4638 | decode: &literal_decode)) |
| 4639 | return 0; |
| 4640 | |
| 4641 | pfn = elf_zstd_make_offset_baseline_fse; |
| 4642 | if (!elf_zstd_unpack_seq_decode (mode: (seq_hdr >> 4) & 3, |
| 4643 | ppin: &pin, pinend, |
| 4644 | predef: &elf_zstd_offset_table[0], predef_bits: 5, |
| 4645 | scratch, maxidx: 31, |
| 4646 | table: offset_fse_table, table_bits: 8, conv: pfn, |
| 4647 | decode: &offset_decode)) |
| 4648 | return 0; |
| 4649 | |
| 4650 | pfn = elf_zstd_make_match_baseline_fse; |
| 4651 | if (!elf_zstd_unpack_seq_decode (mode: (seq_hdr >> 2) & 3, |
| 4652 | ppin: &pin, pinend, |
| 4653 | predef: &elf_zstd_match_table[0], predef_bits: 6, |
| 4654 | scratch, maxidx: 52, |
| 4655 | table: match_fse_table, table_bits: 9, conv: pfn, |
| 4656 | decode: &match_decode)) |
| 4657 | return 0; |
| 4658 | } |
| 4659 | |
| 4660 | pback = pblockend - 1; |
| 4661 | if (!elf_fetch_backward_init (ppin: &pback, pinend: pin, pval: &val, pbits: &bits)) |
| 4662 | return 0; |
| 4663 | |
| 4664 | bits -= literal_decode.table_bits; |
| 4665 | literal_state = ((val >> bits) |
| 4666 | & ((1U << literal_decode.table_bits) - 1)); |
| 4667 | |
| 4668 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pin, pval: &val, pbits: &bits)) |
| 4669 | return 0; |
| 4670 | bits -= offset_decode.table_bits; |
| 4671 | offset_state = ((val >> bits) |
| 4672 | & ((1U << offset_decode.table_bits) - 1)); |
| 4673 | |
| 4674 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pin, pval: &val, pbits: &bits)) |
| 4675 | return 0; |
| 4676 | bits -= match_decode.table_bits; |
| 4677 | match_state = ((val >> bits) |
| 4678 | & ((1U << match_decode.table_bits) - 1)); |
| 4679 | |
| 4680 | seq = 0; |
| 4681 | while (1) |
| 4682 | { |
| 4683 | const struct elf_zstd_fse_baseline_entry *pt; |
| 4684 | uint32_t offset_basebits; |
| 4685 | uint32_t offset_baseline; |
| 4686 | uint32_t offset_bits; |
| 4687 | uint32_t offset_base; |
| 4688 | uint32_t offset; |
| 4689 | uint32_t match_baseline; |
| 4690 | uint32_t match_bits; |
| 4691 | uint32_t match_base; |
| 4692 | uint32_t match; |
| 4693 | uint32_t literal_baseline; |
| 4694 | uint32_t literal_bits; |
| 4695 | uint32_t literal_base; |
| 4696 | uint32_t literal; |
| 4697 | uint32_t need; |
| 4698 | uint32_t add; |
| 4699 | |
| 4700 | pt = &offset_decode.table[offset_state]; |
| 4701 | offset_basebits = pt->basebits; |
| 4702 | offset_baseline = pt->baseline; |
| 4703 | offset_bits = pt->bits; |
| 4704 | offset_base = pt->base; |
| 4705 | |
| 4706 | /* This case can be more than 16 bits, which is all that |
| 4707 | elf_fetch_bits_backward promises. */ |
| 4708 | need = offset_basebits; |
| 4709 | add = 0; |
| 4710 | if (unlikely (need > 16)) |
| 4711 | { |
| 4712 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pin, pval: &val, pbits: &bits)) |
| 4713 | return 0; |
| 4714 | bits -= 16; |
| 4715 | add = (val >> bits) & ((1U << 16) - 1); |
| 4716 | need -= 16; |
| 4717 | add <<= need; |
| 4718 | } |
| 4719 | if (need > 0) |
| 4720 | { |
| 4721 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pin, pval: &val, pbits: &bits)) |
| 4722 | return 0; |
| 4723 | bits -= need; |
| 4724 | add += (val >> bits) & ((1U << need) - 1); |
| 4725 | } |
| 4726 | |
| 4727 | offset = offset_baseline + add; |
| 4728 | |
| 4729 | pt = &match_decode.table[match_state]; |
| 4730 | need = pt->basebits; |
| 4731 | match_baseline = pt->baseline; |
| 4732 | match_bits = pt->bits; |
| 4733 | match_base = pt->base; |
| 4734 | |
| 4735 | add = 0; |
| 4736 | if (need > 0) |
| 4737 | { |
| 4738 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pin, pval: &val, pbits: &bits)) |
| 4739 | return 0; |
| 4740 | bits -= need; |
| 4741 | add = (val >> bits) & ((1U << need) - 1); |
| 4742 | } |
| 4743 | |
| 4744 | match = match_baseline + add; |
| 4745 | |
| 4746 | pt = &literal_decode.table[literal_state]; |
| 4747 | need = pt->basebits; |
| 4748 | literal_baseline = pt->baseline; |
| 4749 | literal_bits = pt->bits; |
| 4750 | literal_base = pt->base; |
| 4751 | |
| 4752 | add = 0; |
| 4753 | if (need > 0) |
| 4754 | { |
| 4755 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pin, pval: &val, pbits: &bits)) |
| 4756 | return 0; |
| 4757 | bits -= need; |
| 4758 | add = (val >> bits) & ((1U << need) - 1); |
| 4759 | } |
| 4760 | |
| 4761 | literal = literal_baseline + add; |
| 4762 | |
| 4763 | /* See the comment in elf_zstd_make_offset_baseline_fse. */ |
| 4764 | if (offset_basebits > 1) |
| 4765 | { |
| 4766 | repeated_offset3 = repeated_offset2; |
| 4767 | repeated_offset2 = repeated_offset1; |
| 4768 | repeated_offset1 = offset; |
| 4769 | } |
| 4770 | else |
| 4771 | { |
| 4772 | if (unlikely (literal == 0)) |
| 4773 | ++offset; |
| 4774 | switch (offset) |
| 4775 | { |
| 4776 | case 1: |
| 4777 | offset = repeated_offset1; |
| 4778 | break; |
| 4779 | case 2: |
| 4780 | offset = repeated_offset2; |
| 4781 | repeated_offset2 = repeated_offset1; |
| 4782 | repeated_offset1 = offset; |
| 4783 | break; |
| 4784 | case 3: |
| 4785 | offset = repeated_offset3; |
| 4786 | repeated_offset3 = repeated_offset2; |
| 4787 | repeated_offset2 = repeated_offset1; |
| 4788 | repeated_offset1 = offset; |
| 4789 | break; |
| 4790 | case 4: |
| 4791 | offset = repeated_offset1 - 1; |
| 4792 | repeated_offset3 = repeated_offset2; |
| 4793 | repeated_offset2 = repeated_offset1; |
| 4794 | repeated_offset1 = offset; |
| 4795 | break; |
| 4796 | } |
| 4797 | } |
| 4798 | |
| 4799 | ++seq; |
| 4800 | if (seq < seq_count) |
| 4801 | { |
| 4802 | uint32_t v; |
| 4803 | |
| 4804 | /* Update the three states. */ |
| 4805 | |
| 4806 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pin, pval: &val, pbits: &bits)) |
| 4807 | return 0; |
| 4808 | |
| 4809 | need = literal_bits; |
| 4810 | bits -= need; |
| 4811 | v = (val >> bits) & (((uint32_t)1 << need) - 1); |
| 4812 | |
| 4813 | literal_state = literal_base + v; |
| 4814 | |
| 4815 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pin, pval: &val, pbits: &bits)) |
| 4816 | return 0; |
| 4817 | |
| 4818 | need = match_bits; |
| 4819 | bits -= need; |
| 4820 | v = (val >> bits) & (((uint32_t)1 << need) - 1); |
| 4821 | |
| 4822 | match_state = match_base + v; |
| 4823 | |
| 4824 | if (!elf_fetch_bits_backward (ppin: &pback, pinend: pin, pval: &val, pbits: &bits)) |
| 4825 | return 0; |
| 4826 | |
| 4827 | need = offset_bits; |
| 4828 | bits -= need; |
| 4829 | v = (val >> bits) & (((uint32_t)1 << need) - 1); |
| 4830 | |
| 4831 | offset_state = offset_base + v; |
| 4832 | } |
| 4833 | |
| 4834 | /* The next sequence is now in LITERAL, OFFSET, MATCH. */ |
| 4835 | |
| 4836 | /* Copy LITERAL bytes from the literals. */ |
| 4837 | |
| 4838 | if (unlikely ((size_t)(poutend - pout) < literal)) |
| 4839 | { |
| 4840 | elf_uncompress_failed (); |
| 4841 | return 0; |
| 4842 | } |
| 4843 | |
| 4844 | if (unlikely (literal_count < literal)) |
| 4845 | { |
| 4846 | elf_uncompress_failed (); |
| 4847 | return 0; |
| 4848 | } |
| 4849 | |
| 4850 | literal_count -= literal; |
| 4851 | |
| 4852 | /* Often LITERAL is small, so handle small cases quickly. */ |
| 4853 | switch (literal) |
| 4854 | { |
| 4855 | case 8: |
| 4856 | *pout++ = *plit++; |
| 4857 | ATTRIBUTE_FALLTHROUGH; |
| 4858 | case 7: |
| 4859 | *pout++ = *plit++; |
| 4860 | ATTRIBUTE_FALLTHROUGH; |
| 4861 | case 6: |
| 4862 | *pout++ = *plit++; |
| 4863 | ATTRIBUTE_FALLTHROUGH; |
| 4864 | case 5: |
| 4865 | *pout++ = *plit++; |
| 4866 | ATTRIBUTE_FALLTHROUGH; |
| 4867 | case 4: |
| 4868 | *pout++ = *plit++; |
| 4869 | ATTRIBUTE_FALLTHROUGH; |
| 4870 | case 3: |
| 4871 | *pout++ = *plit++; |
| 4872 | ATTRIBUTE_FALLTHROUGH; |
| 4873 | case 2: |
| 4874 | *pout++ = *plit++; |
| 4875 | ATTRIBUTE_FALLTHROUGH; |
| 4876 | case 1: |
| 4877 | *pout++ = *plit++; |
| 4878 | break; |
| 4879 | |
| 4880 | case 0: |
| 4881 | break; |
| 4882 | |
| 4883 | default: |
| 4884 | if (unlikely ((size_t)(plit - pout) < literal)) |
| 4885 | { |
| 4886 | uint32_t move; |
| 4887 | |
| 4888 | move = plit - pout; |
| 4889 | while (literal > move) |
| 4890 | { |
| 4891 | memcpy (dest: pout, src: plit, n: move); |
| 4892 | pout += move; |
| 4893 | plit += move; |
| 4894 | literal -= move; |
| 4895 | } |
| 4896 | } |
| 4897 | |
| 4898 | memcpy (dest: pout, src: plit, n: literal); |
| 4899 | pout += literal; |
| 4900 | plit += literal; |
| 4901 | } |
| 4902 | |
| 4903 | if (match > 0) |
| 4904 | { |
| 4905 | /* Copy MATCH bytes from the decoded output at OFFSET. */ |
| 4906 | |
| 4907 | if (unlikely ((size_t)(poutend - pout) < match)) |
| 4908 | { |
| 4909 | elf_uncompress_failed (); |
| 4910 | return 0; |
| 4911 | } |
| 4912 | |
| 4913 | if (unlikely ((size_t)(pout - poutstart) < offset)) |
| 4914 | { |
| 4915 | elf_uncompress_failed (); |
| 4916 | return 0; |
| 4917 | } |
| 4918 | |
| 4919 | if (offset >= match) |
| 4920 | { |
| 4921 | memcpy (dest: pout, src: pout - offset, n: match); |
| 4922 | pout += match; |
| 4923 | } |
| 4924 | else |
| 4925 | { |
| 4926 | while (match > 0) |
| 4927 | { |
| 4928 | uint32_t copy; |
| 4929 | |
| 4930 | copy = match < offset ? match : offset; |
| 4931 | memcpy (dest: pout, src: pout - offset, n: copy); |
| 4932 | match -= copy; |
| 4933 | pout += copy; |
| 4934 | } |
| 4935 | } |
| 4936 | } |
| 4937 | |
| 4938 | if (unlikely (seq >= seq_count)) |
| 4939 | { |
| 4940 | /* Copy remaining literals. */ |
| 4941 | if (literal_count > 0 && plit != pout) |
| 4942 | { |
| 4943 | if (unlikely ((size_t)(poutend - pout) |
| 4944 | < literal_count)) |
| 4945 | { |
| 4946 | elf_uncompress_failed (); |
| 4947 | return 0; |
| 4948 | } |
| 4949 | |
| 4950 | if ((size_t)(plit - pout) < literal_count) |
| 4951 | { |
| 4952 | uint32_t move; |
| 4953 | |
| 4954 | move = plit - pout; |
| 4955 | while (literal_count > move) |
| 4956 | { |
| 4957 | memcpy (dest: pout, src: plit, n: move); |
| 4958 | pout += move; |
| 4959 | plit += move; |
| 4960 | literal_count -= move; |
| 4961 | } |
| 4962 | } |
| 4963 | |
| 4964 | memcpy (dest: pout, src: plit, n: literal_count); |
| 4965 | } |
| 4966 | |
| 4967 | pout += literal_count; |
| 4968 | |
| 4969 | break; |
| 4970 | } |
| 4971 | } |
| 4972 | |
| 4973 | pin = pblockend; |
| 4974 | } |
| 4975 | break; |
| 4976 | |
| 4977 | case 3: |
| 4978 | default: |
| 4979 | elf_uncompress_failed (); |
| 4980 | return 0; |
| 4981 | } |
| 4982 | } |
| 4983 | |
| 4984 | if (has_checksum) |
| 4985 | { |
| 4986 | if (unlikely (pin + 4 > pinend)) |
| 4987 | { |
| 4988 | elf_uncompress_failed (); |
| 4989 | return 0; |
| 4990 | } |
| 4991 | |
| 4992 | /* We don't currently verify the checksum. Currently running GNU ld with |
| 4993 | --compress-debug-sections=zstd does not seem to generate a |
| 4994 | checksum. */ |
| 4995 | |
| 4996 | pin += 4; |
| 4997 | } |
| 4998 | |
| 4999 | if (pin != pinend) |
| 5000 | { |
| 5001 | elf_uncompress_failed (); |
| 5002 | return 0; |
| 5003 | } |
| 5004 | |
| 5005 | return 1; |
| 5006 | } |
| 5007 | |
| 5008 | #define ZDEBUG_TABLE_SIZE \ |
| 5009 | (ZLIB_TABLE_SIZE > ZSTD_TABLE_SIZE ? ZLIB_TABLE_SIZE : ZSTD_TABLE_SIZE) |
| 5010 | |
| 5011 | /* Uncompress the old compressed debug format, the one emitted by |
| 5012 | --compress-debug-sections=zlib-gnu. The compressed data is in |
| 5013 | COMPRESSED / COMPRESSED_SIZE, and the function writes to |
| 5014 | *UNCOMPRESSED / *UNCOMPRESSED_SIZE. ZDEBUG_TABLE is work space to |
| 5015 | hold Huffman tables. Returns 0 on error, 1 on successful |
| 5016 | decompression or if something goes wrong. In general we try to |
| 5017 | carry on, by returning 1, even if we can't decompress. */ |
| 5018 | |
| 5019 | static int |
| 5020 | elf_uncompress_zdebug (struct backtrace_state *state, |
| 5021 | const unsigned char *compressed, size_t compressed_size, |
| 5022 | uint16_t *zdebug_table, |
| 5023 | backtrace_error_callback error_callback, void *data, |
| 5024 | unsigned char **uncompressed, size_t *uncompressed_size) |
| 5025 | { |
| 5026 | size_t sz; |
| 5027 | size_t i; |
| 5028 | unsigned char *po; |
| 5029 | |
| 5030 | *uncompressed = NULL; |
| 5031 | *uncompressed_size = 0; |
| 5032 | |
| 5033 | /* The format starts with the four bytes ZLIB, followed by the 8 |
| 5034 | byte length of the uncompressed data in big-endian order, |
| 5035 | followed by a zlib stream. */ |
| 5036 | |
| 5037 | if (compressed_size < 12 || memcmp (s1: compressed, s2: "ZLIB" , n: 4) != 0) |
| 5038 | return 1; |
| 5039 | |
| 5040 | sz = 0; |
| 5041 | for (i = 0; i < 8; i++) |
| 5042 | sz = (sz << 8) | compressed[i + 4]; |
| 5043 | |
| 5044 | if (*uncompressed != NULL && *uncompressed_size >= sz) |
| 5045 | po = *uncompressed; |
| 5046 | else |
| 5047 | { |
| 5048 | po = (unsigned char *) backtrace_alloc (state, size: sz, error_callback, data); |
| 5049 | if (po == NULL) |
| 5050 | return 0; |
| 5051 | } |
| 5052 | |
| 5053 | if (!elf_zlib_inflate_and_verify (pin: compressed + 12, sin: compressed_size - 12, |
| 5054 | zdebug_table, pout: po, sout: sz)) |
| 5055 | return 1; |
| 5056 | |
| 5057 | *uncompressed = po; |
| 5058 | *uncompressed_size = sz; |
| 5059 | |
| 5060 | return 1; |
| 5061 | } |
| 5062 | |
| 5063 | /* Uncompress the new compressed debug format, the official standard |
| 5064 | ELF approach emitted by --compress-debug-sections=zlib-gabi. The |
| 5065 | compressed data is in COMPRESSED / COMPRESSED_SIZE, and the |
| 5066 | function writes to *UNCOMPRESSED / *UNCOMPRESSED_SIZE. |
| 5067 | ZDEBUG_TABLE is work space as for elf_uncompress_zdebug. Returns 0 |
| 5068 | on error, 1 on successful decompression or if something goes wrong. |
| 5069 | In general we try to carry on, by returning 1, even if we can't |
| 5070 | decompress. */ |
| 5071 | |
| 5072 | static int |
| 5073 | elf_uncompress_chdr (struct backtrace_state *state, |
| 5074 | const unsigned char *compressed, size_t compressed_size, |
| 5075 | uint16_t *zdebug_table, |
| 5076 | backtrace_error_callback error_callback, void *data, |
| 5077 | unsigned char **uncompressed, size_t *uncompressed_size) |
| 5078 | { |
| 5079 | b_elf_chdr chdr; |
| 5080 | char *alc; |
| 5081 | size_t alc_len; |
| 5082 | unsigned char *po; |
| 5083 | |
| 5084 | *uncompressed = NULL; |
| 5085 | *uncompressed_size = 0; |
| 5086 | |
| 5087 | /* The format starts with an ELF compression header. */ |
| 5088 | if (compressed_size < sizeof (b_elf_chdr)) |
| 5089 | return 1; |
| 5090 | |
| 5091 | /* The lld linker can misalign a compressed section, so we can't safely read |
| 5092 | the fields directly as we can for other ELF sections. See |
| 5093 | https://github.com/ianlancetaylor/libbacktrace/pull/120. */ |
| 5094 | memcpy (dest: &chdr, src: compressed, n: sizeof (b_elf_chdr)); |
| 5095 | |
| 5096 | alc = NULL; |
| 5097 | alc_len = 0; |
| 5098 | if (*uncompressed != NULL && *uncompressed_size >= chdr.ch_size) |
| 5099 | po = *uncompressed; |
| 5100 | else |
| 5101 | { |
| 5102 | alc_len = chdr.ch_size; |
| 5103 | alc = backtrace_alloc (state, size: alc_len, error_callback, data); |
| 5104 | if (alc == NULL) |
| 5105 | return 0; |
| 5106 | po = (unsigned char *) alc; |
| 5107 | } |
| 5108 | |
| 5109 | switch (chdr.ch_type) |
| 5110 | { |
| 5111 | case ELFCOMPRESS_ZLIB: |
| 5112 | if (!elf_zlib_inflate_and_verify (pin: compressed + sizeof (b_elf_chdr), |
| 5113 | sin: compressed_size - sizeof (b_elf_chdr), |
| 5114 | zdebug_table, pout: po, sout: chdr.ch_size)) |
| 5115 | goto skip; |
| 5116 | break; |
| 5117 | |
| 5118 | case ELFCOMPRESS_ZSTD: |
| 5119 | if (!elf_zstd_decompress (pin: compressed + sizeof (b_elf_chdr), |
| 5120 | sin: compressed_size - sizeof (b_elf_chdr), |
| 5121 | zdebug_table: (unsigned char *)zdebug_table, pout: po, |
| 5122 | sout: chdr.ch_size)) |
| 5123 | goto skip; |
| 5124 | break; |
| 5125 | |
| 5126 | default: |
| 5127 | /* Unsupported compression algorithm. */ |
| 5128 | goto skip; |
| 5129 | } |
| 5130 | |
| 5131 | *uncompressed = po; |
| 5132 | *uncompressed_size = chdr.ch_size; |
| 5133 | |
| 5134 | return 1; |
| 5135 | |
| 5136 | skip: |
| 5137 | if (alc != NULL && alc_len > 0) |
| 5138 | backtrace_free (state, mem: alc, size: alc_len, error_callback, data); |
| 5139 | return 1; |
| 5140 | } |
| 5141 | |
| 5142 | /* This function is a hook for testing the zlib support. It is only |
| 5143 | used by tests. */ |
| 5144 | |
| 5145 | int |
| 5146 | backtrace_uncompress_zdebug (struct backtrace_state *state, |
| 5147 | const unsigned char *compressed, |
| 5148 | size_t compressed_size, |
| 5149 | backtrace_error_callback error_callback, |
| 5150 | void *data, unsigned char **uncompressed, |
| 5151 | size_t *uncompressed_size) |
| 5152 | { |
| 5153 | uint16_t *zdebug_table; |
| 5154 | int ret; |
| 5155 | |
| 5156 | zdebug_table = ((uint16_t *) backtrace_alloc (state, ZDEBUG_TABLE_SIZE, |
| 5157 | error_callback, data)); |
| 5158 | if (zdebug_table == NULL) |
| 5159 | return 0; |
| 5160 | ret = elf_uncompress_zdebug (state, compressed, compressed_size, |
| 5161 | zdebug_table, error_callback, data, |
| 5162 | uncompressed, uncompressed_size); |
| 5163 | backtrace_free (state, mem: zdebug_table, ZDEBUG_TABLE_SIZE, |
| 5164 | error_callback, data); |
| 5165 | return ret; |
| 5166 | } |
| 5167 | |
| 5168 | /* This function is a hook for testing the zstd support. It is only used by |
| 5169 | tests. */ |
| 5170 | |
| 5171 | int |
| 5172 | backtrace_uncompress_zstd (struct backtrace_state *state, |
| 5173 | const unsigned char *compressed, |
| 5174 | size_t compressed_size, |
| 5175 | backtrace_error_callback error_callback, |
| 5176 | void *data, unsigned char *uncompressed, |
| 5177 | size_t uncompressed_size) |
| 5178 | { |
| 5179 | unsigned char *zdebug_table; |
| 5180 | int ret; |
| 5181 | |
| 5182 | zdebug_table = ((unsigned char *) backtrace_alloc (state, ZDEBUG_TABLE_SIZE, |
| 5183 | error_callback, data)); |
| 5184 | if (zdebug_table == NULL) |
| 5185 | return 0; |
| 5186 | ret = elf_zstd_decompress (pin: compressed, sin: compressed_size, |
| 5187 | zdebug_table, pout: uncompressed, sout: uncompressed_size); |
| 5188 | backtrace_free (state, mem: zdebug_table, ZDEBUG_TABLE_SIZE, |
| 5189 | error_callback, data); |
| 5190 | return ret; |
| 5191 | } |
| 5192 | |
| 5193 | /* Number of LZMA states. */ |
| 5194 | #define LZMA_STATES (12) |
| 5195 | |
| 5196 | /* Number of LZMA position states. The pb value of the property byte |
| 5197 | is the number of bits to include in these states, and the maximum |
| 5198 | value of pb is 4. */ |
| 5199 | #define LZMA_POS_STATES (16) |
| 5200 | |
| 5201 | /* Number of LZMA distance states. These are used match distances |
| 5202 | with a short match length: up to 4 bytes. */ |
| 5203 | #define LZMA_DIST_STATES (4) |
| 5204 | |
| 5205 | /* Number of LZMA distance slots. LZMA uses six bits to encode larger |
| 5206 | match lengths, so 1 << 6 possible probabilities. */ |
| 5207 | #define LZMA_DIST_SLOTS (64) |
| 5208 | |
| 5209 | /* LZMA distances 0 to 3 are encoded directly, larger values use a |
| 5210 | probability model. */ |
| 5211 | #define LZMA_DIST_MODEL_START (4) |
| 5212 | |
| 5213 | /* The LZMA probability model ends at 14. */ |
| 5214 | #define LZMA_DIST_MODEL_END (14) |
| 5215 | |
| 5216 | /* LZMA distance slots for distances less than 127. */ |
| 5217 | #define LZMA_FULL_DISTANCES (128) |
| 5218 | |
| 5219 | /* LZMA uses four alignment bits. */ |
| 5220 | #define LZMA_ALIGN_SIZE (16) |
| 5221 | |
| 5222 | /* LZMA match length is encoded with 4, 5, or 10 bits, some of which |
| 5223 | are already known. */ |
| 5224 | #define LZMA_LEN_LOW_SYMBOLS (8) |
| 5225 | #define LZMA_LEN_MID_SYMBOLS (8) |
| 5226 | #define LZMA_LEN_HIGH_SYMBOLS (256) |
| 5227 | |
| 5228 | /* LZMA literal encoding. */ |
| 5229 | #define LZMA_LITERAL_CODERS_MAX (16) |
| 5230 | #define LZMA_LITERAL_CODER_SIZE (0x300) |
| 5231 | |
| 5232 | /* LZMA is based on a large set of probabilities, each managed |
| 5233 | independently. Each probability is an 11 bit number that we store |
| 5234 | in a uint16_t. We use a single large array of probabilities. */ |
| 5235 | |
| 5236 | /* Lengths of entries in the LZMA probabilities array. The names used |
| 5237 | here are copied from the Linux kernel implementation. */ |
| 5238 | |
| 5239 | #define LZMA_PROB_IS_MATCH_LEN (LZMA_STATES * LZMA_POS_STATES) |
| 5240 | #define LZMA_PROB_IS_REP_LEN LZMA_STATES |
| 5241 | #define LZMA_PROB_IS_REP0_LEN LZMA_STATES |
| 5242 | #define LZMA_PROB_IS_REP1_LEN LZMA_STATES |
| 5243 | #define LZMA_PROB_IS_REP2_LEN LZMA_STATES |
| 5244 | #define LZMA_PROB_IS_REP0_LONG_LEN (LZMA_STATES * LZMA_POS_STATES) |
| 5245 | #define LZMA_PROB_DIST_SLOT_LEN (LZMA_DIST_STATES * LZMA_DIST_SLOTS) |
| 5246 | #define LZMA_PROB_DIST_SPECIAL_LEN (LZMA_FULL_DISTANCES - LZMA_DIST_MODEL_END) |
| 5247 | #define LZMA_PROB_DIST_ALIGN_LEN LZMA_ALIGN_SIZE |
| 5248 | #define LZMA_PROB_MATCH_LEN_CHOICE_LEN 1 |
| 5249 | #define LZMA_PROB_MATCH_LEN_CHOICE2_LEN 1 |
| 5250 | #define LZMA_PROB_MATCH_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS) |
| 5251 | #define LZMA_PROB_MATCH_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS) |
| 5252 | #define LZMA_PROB_MATCH_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS |
| 5253 | #define LZMA_PROB_REP_LEN_CHOICE_LEN 1 |
| 5254 | #define LZMA_PROB_REP_LEN_CHOICE2_LEN 1 |
| 5255 | #define LZMA_PROB_REP_LEN_LOW_LEN (LZMA_POS_STATES * LZMA_LEN_LOW_SYMBOLS) |
| 5256 | #define LZMA_PROB_REP_LEN_MID_LEN (LZMA_POS_STATES * LZMA_LEN_MID_SYMBOLS) |
| 5257 | #define LZMA_PROB_REP_LEN_HIGH_LEN LZMA_LEN_HIGH_SYMBOLS |
| 5258 | #define LZMA_PROB_LITERAL_LEN \ |
| 5259 | (LZMA_LITERAL_CODERS_MAX * LZMA_LITERAL_CODER_SIZE) |
| 5260 | |
| 5261 | /* Offsets into the LZMA probabilities array. This is mechanically |
| 5262 | generated from the above lengths. */ |
| 5263 | |
| 5264 | #define LZMA_PROB_IS_MATCH_OFFSET 0 |
| 5265 | #define LZMA_PROB_IS_REP_OFFSET \ |
| 5266 | (LZMA_PROB_IS_MATCH_OFFSET + LZMA_PROB_IS_MATCH_LEN) |
| 5267 | #define LZMA_PROB_IS_REP0_OFFSET \ |
| 5268 | (LZMA_PROB_IS_REP_OFFSET + LZMA_PROB_IS_REP_LEN) |
| 5269 | #define LZMA_PROB_IS_REP1_OFFSET \ |
| 5270 | (LZMA_PROB_IS_REP0_OFFSET + LZMA_PROB_IS_REP0_LEN) |
| 5271 | #define LZMA_PROB_IS_REP2_OFFSET \ |
| 5272 | (LZMA_PROB_IS_REP1_OFFSET + LZMA_PROB_IS_REP1_LEN) |
| 5273 | #define LZMA_PROB_IS_REP0_LONG_OFFSET \ |
| 5274 | (LZMA_PROB_IS_REP2_OFFSET + LZMA_PROB_IS_REP2_LEN) |
| 5275 | #define LZMA_PROB_DIST_SLOT_OFFSET \ |
| 5276 | (LZMA_PROB_IS_REP0_LONG_OFFSET + LZMA_PROB_IS_REP0_LONG_LEN) |
| 5277 | #define LZMA_PROB_DIST_SPECIAL_OFFSET \ |
| 5278 | (LZMA_PROB_DIST_SLOT_OFFSET + LZMA_PROB_DIST_SLOT_LEN) |
| 5279 | #define LZMA_PROB_DIST_ALIGN_OFFSET \ |
| 5280 | (LZMA_PROB_DIST_SPECIAL_OFFSET + LZMA_PROB_DIST_SPECIAL_LEN) |
| 5281 | #define LZMA_PROB_MATCH_LEN_CHOICE_OFFSET \ |
| 5282 | (LZMA_PROB_DIST_ALIGN_OFFSET + LZMA_PROB_DIST_ALIGN_LEN) |
| 5283 | #define LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET \ |
| 5284 | (LZMA_PROB_MATCH_LEN_CHOICE_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE_LEN) |
| 5285 | #define LZMA_PROB_MATCH_LEN_LOW_OFFSET \ |
| 5286 | (LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET + LZMA_PROB_MATCH_LEN_CHOICE2_LEN) |
| 5287 | #define LZMA_PROB_MATCH_LEN_MID_OFFSET \ |
| 5288 | (LZMA_PROB_MATCH_LEN_LOW_OFFSET + LZMA_PROB_MATCH_LEN_LOW_LEN) |
| 5289 | #define LZMA_PROB_MATCH_LEN_HIGH_OFFSET \ |
| 5290 | (LZMA_PROB_MATCH_LEN_MID_OFFSET + LZMA_PROB_MATCH_LEN_MID_LEN) |
| 5291 | #define LZMA_PROB_REP_LEN_CHOICE_OFFSET \ |
| 5292 | (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + LZMA_PROB_MATCH_LEN_HIGH_LEN) |
| 5293 | #define LZMA_PROB_REP_LEN_CHOICE2_OFFSET \ |
| 5294 | (LZMA_PROB_REP_LEN_CHOICE_OFFSET + LZMA_PROB_REP_LEN_CHOICE_LEN) |
| 5295 | #define LZMA_PROB_REP_LEN_LOW_OFFSET \ |
| 5296 | (LZMA_PROB_REP_LEN_CHOICE2_OFFSET + LZMA_PROB_REP_LEN_CHOICE2_LEN) |
| 5297 | #define LZMA_PROB_REP_LEN_MID_OFFSET \ |
| 5298 | (LZMA_PROB_REP_LEN_LOW_OFFSET + LZMA_PROB_REP_LEN_LOW_LEN) |
| 5299 | #define LZMA_PROB_REP_LEN_HIGH_OFFSET \ |
| 5300 | (LZMA_PROB_REP_LEN_MID_OFFSET + LZMA_PROB_REP_LEN_MID_LEN) |
| 5301 | #define LZMA_PROB_LITERAL_OFFSET \ |
| 5302 | (LZMA_PROB_REP_LEN_HIGH_OFFSET + LZMA_PROB_REP_LEN_HIGH_LEN) |
| 5303 | |
| 5304 | #define LZMA_PROB_TOTAL_COUNT \ |
| 5305 | (LZMA_PROB_LITERAL_OFFSET + LZMA_PROB_LITERAL_LEN) |
| 5306 | |
| 5307 | /* Check that the number of LZMA probabilities is the same as the |
| 5308 | Linux kernel implementation. */ |
| 5309 | |
| 5310 | #if LZMA_PROB_TOTAL_COUNT != 1846 + (1 << 4) * 0x300 |
| 5311 | #error Wrong number of LZMA probabilities |
| 5312 | #endif |
| 5313 | |
| 5314 | /* Expressions for the offset in the LZMA probabilities array of a |
| 5315 | specific probability. */ |
| 5316 | |
| 5317 | #define LZMA_IS_MATCH(state, pos) \ |
| 5318 | (LZMA_PROB_IS_MATCH_OFFSET + (state) * LZMA_POS_STATES + (pos)) |
| 5319 | #define LZMA_IS_REP(state) \ |
| 5320 | (LZMA_PROB_IS_REP_OFFSET + (state)) |
| 5321 | #define LZMA_IS_REP0(state) \ |
| 5322 | (LZMA_PROB_IS_REP0_OFFSET + (state)) |
| 5323 | #define LZMA_IS_REP1(state) \ |
| 5324 | (LZMA_PROB_IS_REP1_OFFSET + (state)) |
| 5325 | #define LZMA_IS_REP2(state) \ |
| 5326 | (LZMA_PROB_IS_REP2_OFFSET + (state)) |
| 5327 | #define LZMA_IS_REP0_LONG(state, pos) \ |
| 5328 | (LZMA_PROB_IS_REP0_LONG_OFFSET + (state) * LZMA_POS_STATES + (pos)) |
| 5329 | #define LZMA_DIST_SLOT(dist, slot) \ |
| 5330 | (LZMA_PROB_DIST_SLOT_OFFSET + (dist) * LZMA_DIST_SLOTS + (slot)) |
| 5331 | #define LZMA_DIST_SPECIAL(dist) \ |
| 5332 | (LZMA_PROB_DIST_SPECIAL_OFFSET + (dist)) |
| 5333 | #define LZMA_DIST_ALIGN(dist) \ |
| 5334 | (LZMA_PROB_DIST_ALIGN_OFFSET + (dist)) |
| 5335 | #define LZMA_MATCH_LEN_CHOICE \ |
| 5336 | LZMA_PROB_MATCH_LEN_CHOICE_OFFSET |
| 5337 | #define LZMA_MATCH_LEN_CHOICE2 \ |
| 5338 | LZMA_PROB_MATCH_LEN_CHOICE2_OFFSET |
| 5339 | #define LZMA_MATCH_LEN_LOW(pos, sym) \ |
| 5340 | (LZMA_PROB_MATCH_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym)) |
| 5341 | #define LZMA_MATCH_LEN_MID(pos, sym) \ |
| 5342 | (LZMA_PROB_MATCH_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym)) |
| 5343 | #define LZMA_MATCH_LEN_HIGH(sym) \ |
| 5344 | (LZMA_PROB_MATCH_LEN_HIGH_OFFSET + (sym)) |
| 5345 | #define LZMA_REP_LEN_CHOICE \ |
| 5346 | LZMA_PROB_REP_LEN_CHOICE_OFFSET |
| 5347 | #define LZMA_REP_LEN_CHOICE2 \ |
| 5348 | LZMA_PROB_REP_LEN_CHOICE2_OFFSET |
| 5349 | #define LZMA_REP_LEN_LOW(pos, sym) \ |
| 5350 | (LZMA_PROB_REP_LEN_LOW_OFFSET + (pos) * LZMA_LEN_LOW_SYMBOLS + (sym)) |
| 5351 | #define LZMA_REP_LEN_MID(pos, sym) \ |
| 5352 | (LZMA_PROB_REP_LEN_MID_OFFSET + (pos) * LZMA_LEN_MID_SYMBOLS + (sym)) |
| 5353 | #define LZMA_REP_LEN_HIGH(sym) \ |
| 5354 | (LZMA_PROB_REP_LEN_HIGH_OFFSET + (sym)) |
| 5355 | #define LZMA_LITERAL(code, size) \ |
| 5356 | (LZMA_PROB_LITERAL_OFFSET + (code) * LZMA_LITERAL_CODER_SIZE + (size)) |
| 5357 | |
| 5358 | /* Read an LZMA varint from BUF, reading and updating *POFFSET, |
| 5359 | setting *VAL. Returns 0 on error, 1 on success. */ |
| 5360 | |
| 5361 | static int |
| 5362 | elf_lzma_varint (const unsigned char *compressed, size_t compressed_size, |
| 5363 | size_t *poffset, uint64_t *val) |
| 5364 | { |
| 5365 | size_t off; |
| 5366 | int i; |
| 5367 | uint64_t v; |
| 5368 | unsigned char b; |
| 5369 | |
| 5370 | off = *poffset; |
| 5371 | i = 0; |
| 5372 | v = 0; |
| 5373 | while (1) |
| 5374 | { |
| 5375 | if (unlikely (off >= compressed_size)) |
| 5376 | { |
| 5377 | elf_uncompress_failed (); |
| 5378 | return 0; |
| 5379 | } |
| 5380 | b = compressed[off]; |
| 5381 | v |= (b & 0x7f) << (i * 7); |
| 5382 | ++off; |
| 5383 | if ((b & 0x80) == 0) |
| 5384 | { |
| 5385 | *poffset = off; |
| 5386 | *val = v; |
| 5387 | return 1; |
| 5388 | } |
| 5389 | ++i; |
| 5390 | if (unlikely (i >= 9)) |
| 5391 | { |
| 5392 | elf_uncompress_failed (); |
| 5393 | return 0; |
| 5394 | } |
| 5395 | } |
| 5396 | } |
| 5397 | |
| 5398 | /* Normalize the LZMA range decoder, pulling in an extra input byte if |
| 5399 | needed. */ |
| 5400 | |
| 5401 | static void |
| 5402 | elf_lzma_range_normalize (const unsigned char *compressed, |
| 5403 | size_t compressed_size, size_t *poffset, |
| 5404 | uint32_t *prange, uint32_t *pcode) |
| 5405 | { |
| 5406 | if (*prange < (1U << 24)) |
| 5407 | { |
| 5408 | if (unlikely (*poffset >= compressed_size)) |
| 5409 | { |
| 5410 | /* We assume this will be caught elsewhere. */ |
| 5411 | elf_uncompress_failed (); |
| 5412 | return; |
| 5413 | } |
| 5414 | *prange <<= 8; |
| 5415 | *pcode <<= 8; |
| 5416 | *pcode += compressed[*poffset]; |
| 5417 | ++*poffset; |
| 5418 | } |
| 5419 | } |
| 5420 | |
| 5421 | /* Read and return a single bit from the LZMA stream, reading and |
| 5422 | updating *PROB. Each bit comes from the range coder. */ |
| 5423 | |
| 5424 | static int |
| 5425 | elf_lzma_bit (const unsigned char *compressed, size_t compressed_size, |
| 5426 | uint16_t *prob, size_t *poffset, uint32_t *prange, |
| 5427 | uint32_t *pcode) |
| 5428 | { |
| 5429 | uint32_t bound; |
| 5430 | |
| 5431 | elf_lzma_range_normalize (compressed, compressed_size, poffset, |
| 5432 | prange, pcode); |
| 5433 | bound = (*prange >> 11) * (uint32_t) *prob; |
| 5434 | if (*pcode < bound) |
| 5435 | { |
| 5436 | *prange = bound; |
| 5437 | *prob += ((1U << 11) - *prob) >> 5; |
| 5438 | return 0; |
| 5439 | } |
| 5440 | else |
| 5441 | { |
| 5442 | *prange -= bound; |
| 5443 | *pcode -= bound; |
| 5444 | *prob -= *prob >> 5; |
| 5445 | return 1; |
| 5446 | } |
| 5447 | } |
| 5448 | |
| 5449 | /* Read an integer of size BITS from the LZMA stream, most significant |
| 5450 | bit first. The bits are predicted using PROBS. */ |
| 5451 | |
| 5452 | static uint32_t |
| 5453 | elf_lzma_integer (const unsigned char *compressed, size_t compressed_size, |
| 5454 | uint16_t *probs, uint32_t bits, size_t *poffset, |
| 5455 | uint32_t *prange, uint32_t *pcode) |
| 5456 | { |
| 5457 | uint32_t sym; |
| 5458 | uint32_t i; |
| 5459 | |
| 5460 | sym = 1; |
| 5461 | for (i = 0; i < bits; i++) |
| 5462 | { |
| 5463 | int bit; |
| 5464 | |
| 5465 | bit = elf_lzma_bit (compressed, compressed_size, prob: probs + sym, poffset, |
| 5466 | prange, pcode); |
| 5467 | sym <<= 1; |
| 5468 | sym += bit; |
| 5469 | } |
| 5470 | return sym - (1 << bits); |
| 5471 | } |
| 5472 | |
| 5473 | /* Read an integer of size BITS from the LZMA stream, least |
| 5474 | significant bit first. The bits are predicted using PROBS. */ |
| 5475 | |
| 5476 | static uint32_t |
| 5477 | elf_lzma_reverse_integer (const unsigned char *compressed, |
| 5478 | size_t compressed_size, uint16_t *probs, |
| 5479 | uint32_t bits, size_t *poffset, uint32_t *prange, |
| 5480 | uint32_t *pcode) |
| 5481 | { |
| 5482 | uint32_t sym; |
| 5483 | uint32_t val; |
| 5484 | uint32_t i; |
| 5485 | |
| 5486 | sym = 1; |
| 5487 | val = 0; |
| 5488 | for (i = 0; i < bits; i++) |
| 5489 | { |
| 5490 | int bit; |
| 5491 | |
| 5492 | bit = elf_lzma_bit (compressed, compressed_size, prob: probs + sym, poffset, |
| 5493 | prange, pcode); |
| 5494 | sym <<= 1; |
| 5495 | sym += bit; |
| 5496 | val += bit << i; |
| 5497 | } |
| 5498 | return val; |
| 5499 | } |
| 5500 | |
| 5501 | /* Read a length from the LZMA stream. IS_REP picks either LZMA_MATCH |
| 5502 | or LZMA_REP probabilities. */ |
| 5503 | |
| 5504 | static uint32_t |
| 5505 | elf_lzma_len (const unsigned char *compressed, size_t compressed_size, |
| 5506 | uint16_t *probs, int is_rep, unsigned int pos_state, |
| 5507 | size_t *poffset, uint32_t *prange, uint32_t *pcode) |
| 5508 | { |
| 5509 | uint16_t *probs_choice; |
| 5510 | uint16_t *probs_sym; |
| 5511 | uint32_t bits; |
| 5512 | uint32_t len; |
| 5513 | |
| 5514 | probs_choice = probs + (is_rep |
| 5515 | ? LZMA_REP_LEN_CHOICE |
| 5516 | : LZMA_MATCH_LEN_CHOICE); |
| 5517 | if (elf_lzma_bit (compressed, compressed_size, prob: probs_choice, poffset, |
| 5518 | prange, pcode)) |
| 5519 | { |
| 5520 | probs_choice = probs + (is_rep |
| 5521 | ? LZMA_REP_LEN_CHOICE2 |
| 5522 | : LZMA_MATCH_LEN_CHOICE2); |
| 5523 | if (elf_lzma_bit (compressed, compressed_size, prob: probs_choice, |
| 5524 | poffset, prange, pcode)) |
| 5525 | { |
| 5526 | probs_sym = probs + (is_rep |
| 5527 | ? LZMA_REP_LEN_HIGH (0) |
| 5528 | : LZMA_MATCH_LEN_HIGH (0)); |
| 5529 | bits = 8; |
| 5530 | len = 2 + 8 + 8; |
| 5531 | } |
| 5532 | else |
| 5533 | { |
| 5534 | probs_sym = probs + (is_rep |
| 5535 | ? LZMA_REP_LEN_MID (pos_state, 0) |
| 5536 | : LZMA_MATCH_LEN_MID (pos_state, 0)); |
| 5537 | bits = 3; |
| 5538 | len = 2 + 8; |
| 5539 | } |
| 5540 | } |
| 5541 | else |
| 5542 | { |
| 5543 | probs_sym = probs + (is_rep |
| 5544 | ? LZMA_REP_LEN_LOW (pos_state, 0) |
| 5545 | : LZMA_MATCH_LEN_LOW (pos_state, 0)); |
| 5546 | bits = 3; |
| 5547 | len = 2; |
| 5548 | } |
| 5549 | |
| 5550 | len += elf_lzma_integer (compressed, compressed_size, probs: probs_sym, bits, |
| 5551 | poffset, prange, pcode); |
| 5552 | return len; |
| 5553 | } |
| 5554 | |
| 5555 | /* Uncompress one LZMA block from a minidebug file. The compressed |
| 5556 | data is at COMPRESSED + *POFFSET. Update *POFFSET. Store the data |
| 5557 | into the memory at UNCOMPRESSED, size UNCOMPRESSED_SIZE. CHECK is |
| 5558 | the stream flag from the xz header. Return 1 on successful |
| 5559 | decompression. */ |
| 5560 | |
| 5561 | static int |
| 5562 | elf_uncompress_lzma_block (const unsigned char *compressed, |
| 5563 | size_t compressed_size, unsigned char check, |
| 5564 | uint16_t *probs, unsigned char *uncompressed, |
| 5565 | size_t uncompressed_size, size_t *poffset) |
| 5566 | { |
| 5567 | size_t off; |
| 5568 | size_t ; |
| 5569 | size_t ; |
| 5570 | unsigned char block_flags; |
| 5571 | uint64_t ; |
| 5572 | uint64_t ; |
| 5573 | unsigned char lzma2_properties; |
| 5574 | size_t crc_offset; |
| 5575 | uint32_t computed_crc; |
| 5576 | uint32_t stream_crc; |
| 5577 | size_t uncompressed_offset; |
| 5578 | size_t dict_start_offset; |
| 5579 | unsigned int lc; |
| 5580 | unsigned int lp; |
| 5581 | unsigned int pb; |
| 5582 | uint32_t range; |
| 5583 | uint32_t code; |
| 5584 | uint32_t lstate; |
| 5585 | uint32_t dist[4]; |
| 5586 | |
| 5587 | off = *poffset; |
| 5588 | block_header_offset = off; |
| 5589 | |
| 5590 | /* Block header size is a single byte. */ |
| 5591 | if (unlikely (off >= compressed_size)) |
| 5592 | { |
| 5593 | elf_uncompress_failed (); |
| 5594 | return 0; |
| 5595 | } |
| 5596 | block_header_size = (compressed[off] + 1) * 4; |
| 5597 | if (unlikely (off + block_header_size > compressed_size)) |
| 5598 | { |
| 5599 | elf_uncompress_failed (); |
| 5600 | return 0; |
| 5601 | } |
| 5602 | |
| 5603 | /* Block flags. */ |
| 5604 | block_flags = compressed[off + 1]; |
| 5605 | if (unlikely ((block_flags & 0x3c) != 0)) |
| 5606 | { |
| 5607 | elf_uncompress_failed (); |
| 5608 | return 0; |
| 5609 | } |
| 5610 | |
| 5611 | off += 2; |
| 5612 | |
| 5613 | /* Optional compressed size. */ |
| 5614 | header_compressed_size = 0; |
| 5615 | if ((block_flags & 0x40) != 0) |
| 5616 | { |
| 5617 | *poffset = off; |
| 5618 | if (!elf_lzma_varint (compressed, compressed_size, poffset, |
| 5619 | val: &header_compressed_size)) |
| 5620 | return 0; |
| 5621 | off = *poffset; |
| 5622 | } |
| 5623 | |
| 5624 | /* Optional uncompressed size. */ |
| 5625 | header_uncompressed_size = 0; |
| 5626 | if ((block_flags & 0x80) != 0) |
| 5627 | { |
| 5628 | *poffset = off; |
| 5629 | if (!elf_lzma_varint (compressed, compressed_size, poffset, |
| 5630 | val: &header_uncompressed_size)) |
| 5631 | return 0; |
| 5632 | off = *poffset; |
| 5633 | } |
| 5634 | |
| 5635 | /* The recipe for creating a minidebug file is to run the xz program |
| 5636 | with no arguments, so we expect exactly one filter: lzma2. */ |
| 5637 | |
| 5638 | if (unlikely ((block_flags & 0x3) != 0)) |
| 5639 | { |
| 5640 | elf_uncompress_failed (); |
| 5641 | return 0; |
| 5642 | } |
| 5643 | |
| 5644 | if (unlikely (off + 2 >= block_header_offset + block_header_size)) |
| 5645 | { |
| 5646 | elf_uncompress_failed (); |
| 5647 | return 0; |
| 5648 | } |
| 5649 | |
| 5650 | /* The filter ID for LZMA2 is 0x21. */ |
| 5651 | if (unlikely (compressed[off] != 0x21)) |
| 5652 | { |
| 5653 | elf_uncompress_failed (); |
| 5654 | return 0; |
| 5655 | } |
| 5656 | ++off; |
| 5657 | |
| 5658 | /* The size of the filter properties for LZMA2 is 1. */ |
| 5659 | if (unlikely (compressed[off] != 1)) |
| 5660 | { |
| 5661 | elf_uncompress_failed (); |
| 5662 | return 0; |
| 5663 | } |
| 5664 | ++off; |
| 5665 | |
| 5666 | lzma2_properties = compressed[off]; |
| 5667 | ++off; |
| 5668 | |
| 5669 | if (unlikely (lzma2_properties > 40)) |
| 5670 | { |
| 5671 | elf_uncompress_failed (); |
| 5672 | return 0; |
| 5673 | } |
| 5674 | |
| 5675 | /* The properties describe the dictionary size, but we don't care |
| 5676 | what that is. */ |
| 5677 | |
| 5678 | /* Skip to just before CRC, verifying zero bytes in between. */ |
| 5679 | crc_offset = block_header_offset + block_header_size - 4; |
| 5680 | if (unlikely (crc_offset + 4 > compressed_size)) |
| 5681 | { |
| 5682 | elf_uncompress_failed (); |
| 5683 | return 0; |
| 5684 | } |
| 5685 | for (; off < crc_offset; off++) |
| 5686 | { |
| 5687 | if (compressed[off] != 0) |
| 5688 | { |
| 5689 | elf_uncompress_failed (); |
| 5690 | return 0; |
| 5691 | } |
| 5692 | } |
| 5693 | |
| 5694 | /* Block header CRC. */ |
| 5695 | computed_crc = elf_crc32 (crc: 0, buf: compressed + block_header_offset, |
| 5696 | len: block_header_size - 4); |
| 5697 | stream_crc = ((uint32_t)compressed[off] |
| 5698 | | ((uint32_t)compressed[off + 1] << 8) |
| 5699 | | ((uint32_t)compressed[off + 2] << 16) |
| 5700 | | ((uint32_t)compressed[off + 3] << 24)); |
| 5701 | if (unlikely (computed_crc != stream_crc)) |
| 5702 | { |
| 5703 | elf_uncompress_failed (); |
| 5704 | return 0; |
| 5705 | } |
| 5706 | off += 4; |
| 5707 | |
| 5708 | /* Read a sequence of LZMA2 packets. */ |
| 5709 | |
| 5710 | uncompressed_offset = 0; |
| 5711 | dict_start_offset = 0; |
| 5712 | lc = 0; |
| 5713 | lp = 0; |
| 5714 | pb = 0; |
| 5715 | lstate = 0; |
| 5716 | while (off < compressed_size) |
| 5717 | { |
| 5718 | unsigned char control; |
| 5719 | |
| 5720 | range = 0xffffffff; |
| 5721 | code = 0; |
| 5722 | |
| 5723 | control = compressed[off]; |
| 5724 | ++off; |
| 5725 | if (unlikely (control == 0)) |
| 5726 | { |
| 5727 | /* End of packets. */ |
| 5728 | break; |
| 5729 | } |
| 5730 | |
| 5731 | if (control == 1 || control >= 0xe0) |
| 5732 | { |
| 5733 | /* Reset dictionary to empty. */ |
| 5734 | dict_start_offset = uncompressed_offset; |
| 5735 | } |
| 5736 | |
| 5737 | if (control < 0x80) |
| 5738 | { |
| 5739 | size_t chunk_size; |
| 5740 | |
| 5741 | /* The only valid values here are 1 or 2. A 1 means to |
| 5742 | reset the dictionary (done above). Then we see an |
| 5743 | uncompressed chunk. */ |
| 5744 | |
| 5745 | if (unlikely (control > 2)) |
| 5746 | { |
| 5747 | elf_uncompress_failed (); |
| 5748 | return 0; |
| 5749 | } |
| 5750 | |
| 5751 | /* An uncompressed chunk is a two byte size followed by |
| 5752 | data. */ |
| 5753 | |
| 5754 | if (unlikely (off + 2 > compressed_size)) |
| 5755 | { |
| 5756 | elf_uncompress_failed (); |
| 5757 | return 0; |
| 5758 | } |
| 5759 | |
| 5760 | chunk_size = compressed[off] << 8; |
| 5761 | chunk_size += compressed[off + 1]; |
| 5762 | ++chunk_size; |
| 5763 | |
| 5764 | off += 2; |
| 5765 | |
| 5766 | if (unlikely (off + chunk_size > compressed_size)) |
| 5767 | { |
| 5768 | elf_uncompress_failed (); |
| 5769 | return 0; |
| 5770 | } |
| 5771 | if (unlikely (uncompressed_offset + chunk_size > uncompressed_size)) |
| 5772 | { |
| 5773 | elf_uncompress_failed (); |
| 5774 | return 0; |
| 5775 | } |
| 5776 | |
| 5777 | memcpy (dest: uncompressed + uncompressed_offset, src: compressed + off, |
| 5778 | n: chunk_size); |
| 5779 | uncompressed_offset += chunk_size; |
| 5780 | off += chunk_size; |
| 5781 | } |
| 5782 | else |
| 5783 | { |
| 5784 | size_t uncompressed_chunk_start; |
| 5785 | size_t uncompressed_chunk_size; |
| 5786 | size_t compressed_chunk_size; |
| 5787 | size_t limit; |
| 5788 | |
| 5789 | /* An LZMA chunk. This starts with an uncompressed size and |
| 5790 | a compressed size. */ |
| 5791 | |
| 5792 | if (unlikely (off + 4 >= compressed_size)) |
| 5793 | { |
| 5794 | elf_uncompress_failed (); |
| 5795 | return 0; |
| 5796 | } |
| 5797 | |
| 5798 | uncompressed_chunk_start = uncompressed_offset; |
| 5799 | |
| 5800 | uncompressed_chunk_size = (control & 0x1f) << 16; |
| 5801 | uncompressed_chunk_size += compressed[off] << 8; |
| 5802 | uncompressed_chunk_size += compressed[off + 1]; |
| 5803 | ++uncompressed_chunk_size; |
| 5804 | |
| 5805 | compressed_chunk_size = compressed[off + 2] << 8; |
| 5806 | compressed_chunk_size += compressed[off + 3]; |
| 5807 | ++compressed_chunk_size; |
| 5808 | |
| 5809 | off += 4; |
| 5810 | |
| 5811 | /* Bit 7 (0x80) is set. |
| 5812 | Bits 6 and 5 (0x40 and 0x20) are as follows: |
| 5813 | 0: don't reset anything |
| 5814 | 1: reset state |
| 5815 | 2: reset state, read properties |
| 5816 | 3: reset state, read properties, reset dictionary (done above) */ |
| 5817 | |
| 5818 | if (control >= 0xc0) |
| 5819 | { |
| 5820 | unsigned char props; |
| 5821 | |
| 5822 | /* Bit 6 is set, read properties. */ |
| 5823 | |
| 5824 | if (unlikely (off >= compressed_size)) |
| 5825 | { |
| 5826 | elf_uncompress_failed (); |
| 5827 | return 0; |
| 5828 | } |
| 5829 | props = compressed[off]; |
| 5830 | ++off; |
| 5831 | if (unlikely (props > (4 * 5 + 4) * 9 + 8)) |
| 5832 | { |
| 5833 | elf_uncompress_failed (); |
| 5834 | return 0; |
| 5835 | } |
| 5836 | pb = 0; |
| 5837 | while (props >= 9 * 5) |
| 5838 | { |
| 5839 | props -= 9 * 5; |
| 5840 | ++pb; |
| 5841 | } |
| 5842 | lp = 0; |
| 5843 | while (props > 9) |
| 5844 | { |
| 5845 | props -= 9; |
| 5846 | ++lp; |
| 5847 | } |
| 5848 | lc = props; |
| 5849 | if (unlikely (lc + lp > 4)) |
| 5850 | { |
| 5851 | elf_uncompress_failed (); |
| 5852 | return 0; |
| 5853 | } |
| 5854 | } |
| 5855 | |
| 5856 | if (control >= 0xa0) |
| 5857 | { |
| 5858 | size_t i; |
| 5859 | |
| 5860 | /* Bit 5 or 6 is set, reset LZMA state. */ |
| 5861 | |
| 5862 | lstate = 0; |
| 5863 | memset (s: &dist, c: 0, n: sizeof dist); |
| 5864 | for (i = 0; i < LZMA_PROB_TOTAL_COUNT; i++) |
| 5865 | probs[i] = 1 << 10; |
| 5866 | range = 0xffffffff; |
| 5867 | code = 0; |
| 5868 | } |
| 5869 | |
| 5870 | /* Read the range code. */ |
| 5871 | |
| 5872 | if (unlikely (off + 5 > compressed_size)) |
| 5873 | { |
| 5874 | elf_uncompress_failed (); |
| 5875 | return 0; |
| 5876 | } |
| 5877 | |
| 5878 | /* The byte at compressed[off] is ignored for some |
| 5879 | reason. */ |
| 5880 | |
| 5881 | code = (((uint32_t)compressed[off + 1] << 24) |
| 5882 | + ((uint32_t)compressed[off + 2] << 16) |
| 5883 | + ((uint32_t)compressed[off + 3] << 8) |
| 5884 | + (uint32_t)compressed[off + 4]); |
| 5885 | off += 5; |
| 5886 | |
| 5887 | /* This is the main LZMA decode loop. */ |
| 5888 | |
| 5889 | limit = off + compressed_chunk_size; |
| 5890 | *poffset = off; |
| 5891 | while (*poffset < limit) |
| 5892 | { |
| 5893 | unsigned int pos_state; |
| 5894 | |
| 5895 | if (unlikely (uncompressed_offset |
| 5896 | == (uncompressed_chunk_start |
| 5897 | + uncompressed_chunk_size))) |
| 5898 | { |
| 5899 | /* We've decompressed all the expected bytes. */ |
| 5900 | break; |
| 5901 | } |
| 5902 | |
| 5903 | pos_state = ((uncompressed_offset - dict_start_offset) |
| 5904 | & ((1 << pb) - 1)); |
| 5905 | |
| 5906 | if (elf_lzma_bit (compressed, compressed_size, |
| 5907 | prob: probs + LZMA_IS_MATCH (lstate, pos_state), |
| 5908 | poffset, prange: &range, pcode: &code)) |
| 5909 | { |
| 5910 | uint32_t len; |
| 5911 | |
| 5912 | if (elf_lzma_bit (compressed, compressed_size, |
| 5913 | prob: probs + LZMA_IS_REP (lstate), |
| 5914 | poffset, prange: &range, pcode: &code)) |
| 5915 | { |
| 5916 | int short_rep; |
| 5917 | uint32_t next_dist; |
| 5918 | |
| 5919 | /* Repeated match. */ |
| 5920 | |
| 5921 | short_rep = 0; |
| 5922 | if (elf_lzma_bit (compressed, compressed_size, |
| 5923 | prob: probs + LZMA_IS_REP0 (lstate), |
| 5924 | poffset, prange: &range, pcode: &code)) |
| 5925 | { |
| 5926 | if (elf_lzma_bit (compressed, compressed_size, |
| 5927 | prob: probs + LZMA_IS_REP1 (lstate), |
| 5928 | poffset, prange: &range, pcode: &code)) |
| 5929 | { |
| 5930 | if (elf_lzma_bit (compressed, compressed_size, |
| 5931 | prob: probs + LZMA_IS_REP2 (lstate), |
| 5932 | poffset, prange: &range, pcode: &code)) |
| 5933 | { |
| 5934 | next_dist = dist[3]; |
| 5935 | dist[3] = dist[2]; |
| 5936 | } |
| 5937 | else |
| 5938 | { |
| 5939 | next_dist = dist[2]; |
| 5940 | } |
| 5941 | dist[2] = dist[1]; |
| 5942 | } |
| 5943 | else |
| 5944 | { |
| 5945 | next_dist = dist[1]; |
| 5946 | } |
| 5947 | |
| 5948 | dist[1] = dist[0]; |
| 5949 | dist[0] = next_dist; |
| 5950 | } |
| 5951 | else |
| 5952 | { |
| 5953 | if (!elf_lzma_bit (compressed, compressed_size, |
| 5954 | prob: (probs |
| 5955 | + LZMA_IS_REP0_LONG (lstate, |
| 5956 | pos_state)), |
| 5957 | poffset, prange: &range, pcode: &code)) |
| 5958 | short_rep = 1; |
| 5959 | } |
| 5960 | |
| 5961 | if (lstate < 7) |
| 5962 | lstate = short_rep ? 9 : 8; |
| 5963 | else |
| 5964 | lstate = 11; |
| 5965 | |
| 5966 | if (short_rep) |
| 5967 | len = 1; |
| 5968 | else |
| 5969 | len = elf_lzma_len (compressed, compressed_size, |
| 5970 | probs, is_rep: 1, pos_state, poffset, |
| 5971 | prange: &range, pcode: &code); |
| 5972 | } |
| 5973 | else |
| 5974 | { |
| 5975 | uint32_t dist_state; |
| 5976 | uint32_t dist_slot; |
| 5977 | uint16_t *probs_dist; |
| 5978 | |
| 5979 | /* Match. */ |
| 5980 | |
| 5981 | if (lstate < 7) |
| 5982 | lstate = 7; |
| 5983 | else |
| 5984 | lstate = 10; |
| 5985 | dist[3] = dist[2]; |
| 5986 | dist[2] = dist[1]; |
| 5987 | dist[1] = dist[0]; |
| 5988 | len = elf_lzma_len (compressed, compressed_size, |
| 5989 | probs, is_rep: 0, pos_state, poffset, |
| 5990 | prange: &range, pcode: &code); |
| 5991 | |
| 5992 | if (len < 4 + 2) |
| 5993 | dist_state = len - 2; |
| 5994 | else |
| 5995 | dist_state = 3; |
| 5996 | probs_dist = probs + LZMA_DIST_SLOT (dist_state, 0); |
| 5997 | dist_slot = elf_lzma_integer (compressed, |
| 5998 | compressed_size, |
| 5999 | probs: probs_dist, bits: 6, |
| 6000 | poffset, prange: &range, |
| 6001 | pcode: &code); |
| 6002 | if (dist_slot < LZMA_DIST_MODEL_START) |
| 6003 | dist[0] = dist_slot; |
| 6004 | else |
| 6005 | { |
| 6006 | uint32_t limit; |
| 6007 | |
| 6008 | limit = (dist_slot >> 1) - 1; |
| 6009 | dist[0] = 2 + (dist_slot & 1); |
| 6010 | if (dist_slot < LZMA_DIST_MODEL_END) |
| 6011 | { |
| 6012 | dist[0] <<= limit; |
| 6013 | probs_dist = (probs |
| 6014 | + LZMA_DIST_SPECIAL(dist[0] |
| 6015 | - dist_slot |
| 6016 | - 1)); |
| 6017 | dist[0] += |
| 6018 | elf_lzma_reverse_integer (compressed, |
| 6019 | compressed_size, |
| 6020 | probs: probs_dist, |
| 6021 | bits: limit, poffset, |
| 6022 | prange: &range, pcode: &code); |
| 6023 | } |
| 6024 | else |
| 6025 | { |
| 6026 | uint32_t dist0; |
| 6027 | uint32_t i; |
| 6028 | |
| 6029 | dist0 = dist[0]; |
| 6030 | for (i = 0; i < limit - 4; i++) |
| 6031 | { |
| 6032 | uint32_t mask; |
| 6033 | |
| 6034 | elf_lzma_range_normalize (compressed, |
| 6035 | compressed_size, |
| 6036 | poffset, |
| 6037 | prange: &range, pcode: &code); |
| 6038 | range >>= 1; |
| 6039 | code -= range; |
| 6040 | mask = -(code >> 31); |
| 6041 | code += range & mask; |
| 6042 | dist0 <<= 1; |
| 6043 | dist0 += mask + 1; |
| 6044 | } |
| 6045 | dist0 <<= 4; |
| 6046 | probs_dist = probs + LZMA_DIST_ALIGN (0); |
| 6047 | dist0 += |
| 6048 | elf_lzma_reverse_integer (compressed, |
| 6049 | compressed_size, |
| 6050 | probs: probs_dist, bits: 4, |
| 6051 | poffset, |
| 6052 | prange: &range, pcode: &code); |
| 6053 | dist[0] = dist0; |
| 6054 | } |
| 6055 | } |
| 6056 | } |
| 6057 | |
| 6058 | if (unlikely (uncompressed_offset |
| 6059 | - dict_start_offset < dist[0] + 1)) |
| 6060 | { |
| 6061 | elf_uncompress_failed (); |
| 6062 | return 0; |
| 6063 | } |
| 6064 | if (unlikely (uncompressed_offset + len > uncompressed_size)) |
| 6065 | { |
| 6066 | elf_uncompress_failed (); |
| 6067 | return 0; |
| 6068 | } |
| 6069 | |
| 6070 | if (dist[0] == 0) |
| 6071 | { |
| 6072 | /* A common case, meaning repeat the last |
| 6073 | character LEN times. */ |
| 6074 | memset (s: uncompressed + uncompressed_offset, |
| 6075 | c: uncompressed[uncompressed_offset - 1], |
| 6076 | n: len); |
| 6077 | uncompressed_offset += len; |
| 6078 | } |
| 6079 | else if (dist[0] + 1 >= len) |
| 6080 | { |
| 6081 | memcpy (dest: uncompressed + uncompressed_offset, |
| 6082 | src: uncompressed + uncompressed_offset - dist[0] - 1, |
| 6083 | n: len); |
| 6084 | uncompressed_offset += len; |
| 6085 | } |
| 6086 | else |
| 6087 | { |
| 6088 | while (len > 0) |
| 6089 | { |
| 6090 | uint32_t copy; |
| 6091 | |
| 6092 | copy = len < dist[0] + 1 ? len : dist[0] + 1; |
| 6093 | memcpy (dest: uncompressed + uncompressed_offset, |
| 6094 | src: (uncompressed + uncompressed_offset |
| 6095 | - dist[0] - 1), |
| 6096 | n: copy); |
| 6097 | len -= copy; |
| 6098 | uncompressed_offset += copy; |
| 6099 | } |
| 6100 | } |
| 6101 | } |
| 6102 | else |
| 6103 | { |
| 6104 | unsigned char prev; |
| 6105 | unsigned char low; |
| 6106 | size_t high; |
| 6107 | uint16_t *lit_probs; |
| 6108 | unsigned int sym; |
| 6109 | |
| 6110 | /* Literal value. */ |
| 6111 | |
| 6112 | if (uncompressed_offset > 0) |
| 6113 | prev = uncompressed[uncompressed_offset - 1]; |
| 6114 | else |
| 6115 | prev = 0; |
| 6116 | low = prev >> (8 - lc); |
| 6117 | high = (((uncompressed_offset - dict_start_offset) |
| 6118 | & ((1 << lp) - 1)) |
| 6119 | << lc); |
| 6120 | lit_probs = probs + LZMA_LITERAL (low + high, 0); |
| 6121 | if (lstate < 7) |
| 6122 | sym = elf_lzma_integer (compressed, compressed_size, |
| 6123 | probs: lit_probs, bits: 8, poffset, prange: &range, |
| 6124 | pcode: &code); |
| 6125 | else |
| 6126 | { |
| 6127 | unsigned int match; |
| 6128 | unsigned int bit; |
| 6129 | unsigned int match_bit; |
| 6130 | unsigned int idx; |
| 6131 | |
| 6132 | sym = 1; |
| 6133 | if (uncompressed_offset >= dist[0] + 1) |
| 6134 | match = uncompressed[uncompressed_offset - dist[0] - 1]; |
| 6135 | else |
| 6136 | match = 0; |
| 6137 | match <<= 1; |
| 6138 | bit = 0x100; |
| 6139 | do |
| 6140 | { |
| 6141 | match_bit = match & bit; |
| 6142 | match <<= 1; |
| 6143 | idx = bit + match_bit + sym; |
| 6144 | sym <<= 1; |
| 6145 | if (elf_lzma_bit (compressed, compressed_size, |
| 6146 | prob: lit_probs + idx, poffset, |
| 6147 | prange: &range, pcode: &code)) |
| 6148 | { |
| 6149 | ++sym; |
| 6150 | bit &= match_bit; |
| 6151 | } |
| 6152 | else |
| 6153 | { |
| 6154 | bit &= ~ match_bit; |
| 6155 | } |
| 6156 | } |
| 6157 | while (sym < 0x100); |
| 6158 | } |
| 6159 | |
| 6160 | if (unlikely (uncompressed_offset >= uncompressed_size)) |
| 6161 | { |
| 6162 | elf_uncompress_failed (); |
| 6163 | return 0; |
| 6164 | } |
| 6165 | |
| 6166 | uncompressed[uncompressed_offset] = (unsigned char) sym; |
| 6167 | ++uncompressed_offset; |
| 6168 | if (lstate <= 3) |
| 6169 | lstate = 0; |
| 6170 | else if (lstate <= 9) |
| 6171 | lstate -= 3; |
| 6172 | else |
| 6173 | lstate -= 6; |
| 6174 | } |
| 6175 | } |
| 6176 | |
| 6177 | elf_lzma_range_normalize (compressed, compressed_size, poffset, |
| 6178 | prange: &range, pcode: &code); |
| 6179 | |
| 6180 | off = *poffset; |
| 6181 | } |
| 6182 | } |
| 6183 | |
| 6184 | /* We have reached the end of the block. Pad to four byte |
| 6185 | boundary. */ |
| 6186 | off = (off + 3) &~ (size_t) 3; |
| 6187 | if (unlikely (off > compressed_size)) |
| 6188 | { |
| 6189 | elf_uncompress_failed (); |
| 6190 | return 0; |
| 6191 | } |
| 6192 | |
| 6193 | switch (check) |
| 6194 | { |
| 6195 | case 0: |
| 6196 | /* No check. */ |
| 6197 | break; |
| 6198 | |
| 6199 | case 1: |
| 6200 | /* CRC32 */ |
| 6201 | if (unlikely (off + 4 > compressed_size)) |
| 6202 | { |
| 6203 | elf_uncompress_failed (); |
| 6204 | return 0; |
| 6205 | } |
| 6206 | computed_crc = elf_crc32 (crc: 0, buf: uncompressed, len: uncompressed_offset); |
| 6207 | stream_crc = ((uint32_t)compressed[off] |
| 6208 | | ((uint32_t)compressed[off + 1] << 8) |
| 6209 | | ((uint32_t)compressed[off + 2] << 16) |
| 6210 | | ((uint32_t)compressed[off + 3] << 24)); |
| 6211 | if (computed_crc != stream_crc) |
| 6212 | { |
| 6213 | elf_uncompress_failed (); |
| 6214 | return 0; |
| 6215 | } |
| 6216 | off += 4; |
| 6217 | break; |
| 6218 | |
| 6219 | case 4: |
| 6220 | /* CRC64. We don't bother computing a CRC64 checksum. */ |
| 6221 | if (unlikely (off + 8 > compressed_size)) |
| 6222 | { |
| 6223 | elf_uncompress_failed (); |
| 6224 | return 0; |
| 6225 | } |
| 6226 | off += 8; |
| 6227 | break; |
| 6228 | |
| 6229 | case 10: |
| 6230 | /* SHA. We don't bother computing a SHA checksum. */ |
| 6231 | if (unlikely (off + 32 > compressed_size)) |
| 6232 | { |
| 6233 | elf_uncompress_failed (); |
| 6234 | return 0; |
| 6235 | } |
| 6236 | off += 32; |
| 6237 | break; |
| 6238 | |
| 6239 | default: |
| 6240 | elf_uncompress_failed (); |
| 6241 | return 0; |
| 6242 | } |
| 6243 | |
| 6244 | *poffset = off; |
| 6245 | |
| 6246 | return 1; |
| 6247 | } |
| 6248 | |
| 6249 | /* Uncompress LZMA data found in a minidebug file. The minidebug |
| 6250 | format is described at |
| 6251 | https://sourceware.org/gdb/current/onlinedocs/gdb/MiniDebugInfo.html. |
| 6252 | Returns 0 on error, 1 on successful decompression. For this |
| 6253 | function we return 0 on failure to decompress, as the calling code |
| 6254 | will carry on in that case. */ |
| 6255 | |
| 6256 | static int |
| 6257 | elf_uncompress_lzma (struct backtrace_state *state, |
| 6258 | const unsigned char *compressed, size_t compressed_size, |
| 6259 | backtrace_error_callback error_callback, void *data, |
| 6260 | unsigned char **uncompressed, size_t *uncompressed_size) |
| 6261 | { |
| 6262 | size_t ; |
| 6263 | size_t ; |
| 6264 | unsigned char check; |
| 6265 | uint32_t computed_crc; |
| 6266 | uint32_t stream_crc; |
| 6267 | size_t offset; |
| 6268 | size_t index_size; |
| 6269 | size_t ; |
| 6270 | size_t index_offset; |
| 6271 | uint64_t index_compressed_size; |
| 6272 | uint64_t index_uncompressed_size; |
| 6273 | unsigned char *mem; |
| 6274 | uint16_t *probs; |
| 6275 | size_t compressed_block_size; |
| 6276 | |
| 6277 | /* The format starts with a stream header and ends with a stream |
| 6278 | footer. */ |
| 6279 | header_size = 12; |
| 6280 | footer_size = 12; |
| 6281 | if (unlikely (compressed_size < header_size + footer_size)) |
| 6282 | { |
| 6283 | elf_uncompress_failed (); |
| 6284 | return 0; |
| 6285 | } |
| 6286 | |
| 6287 | /* The stream header starts with a magic string. */ |
| 6288 | if (unlikely (memcmp (compressed, "\375" "7zXZ\0" , 6) != 0)) |
| 6289 | { |
| 6290 | elf_uncompress_failed (); |
| 6291 | return 0; |
| 6292 | } |
| 6293 | |
| 6294 | /* Next come stream flags. The first byte is zero, the second byte |
| 6295 | is the check. */ |
| 6296 | if (unlikely (compressed[6] != 0)) |
| 6297 | { |
| 6298 | elf_uncompress_failed (); |
| 6299 | return 0; |
| 6300 | } |
| 6301 | check = compressed[7]; |
| 6302 | if (unlikely ((check & 0xf8) != 0)) |
| 6303 | { |
| 6304 | elf_uncompress_failed (); |
| 6305 | return 0; |
| 6306 | } |
| 6307 | |
| 6308 | /* Next comes a CRC of the stream flags. */ |
| 6309 | computed_crc = elf_crc32 (crc: 0, buf: compressed + 6, len: 2); |
| 6310 | stream_crc = ((uint32_t)compressed[8] |
| 6311 | | ((uint32_t)compressed[9] << 8) |
| 6312 | | ((uint32_t)compressed[10] << 16) |
| 6313 | | ((uint32_t)compressed[11] << 24)); |
| 6314 | if (unlikely (computed_crc != stream_crc)) |
| 6315 | { |
| 6316 | elf_uncompress_failed (); |
| 6317 | return 0; |
| 6318 | } |
| 6319 | |
| 6320 | /* Now that we've parsed the header, parse the footer, so that we |
| 6321 | can get the uncompressed size. */ |
| 6322 | |
| 6323 | /* The footer ends with two magic bytes. */ |
| 6324 | |
| 6325 | offset = compressed_size; |
| 6326 | if (unlikely (memcmp (compressed + offset - 2, "YZ" , 2) != 0)) |
| 6327 | { |
| 6328 | elf_uncompress_failed (); |
| 6329 | return 0; |
| 6330 | } |
| 6331 | offset -= 2; |
| 6332 | |
| 6333 | /* Before that are the stream flags, which should be the same as the |
| 6334 | flags in the header. */ |
| 6335 | if (unlikely (compressed[offset - 2] != 0 |
| 6336 | || compressed[offset - 1] != check)) |
| 6337 | { |
| 6338 | elf_uncompress_failed (); |
| 6339 | return 0; |
| 6340 | } |
| 6341 | offset -= 2; |
| 6342 | |
| 6343 | /* Before that is the size of the index field, which precedes the |
| 6344 | footer. */ |
| 6345 | index_size = ((size_t)compressed[offset - 4] |
| 6346 | | ((size_t)compressed[offset - 3] << 8) |
| 6347 | | ((size_t)compressed[offset - 2] << 16) |
| 6348 | | ((size_t)compressed[offset - 1] << 24)); |
| 6349 | index_size = (index_size + 1) * 4; |
| 6350 | offset -= 4; |
| 6351 | |
| 6352 | /* Before that is a footer CRC. */ |
| 6353 | computed_crc = elf_crc32 (crc: 0, buf: compressed + offset, len: 6); |
| 6354 | stream_crc = ((uint32_t)compressed[offset - 4] |
| 6355 | | ((uint32_t)compressed[offset - 3] << 8) |
| 6356 | | ((uint32_t)compressed[offset - 2] << 16) |
| 6357 | | ((uint32_t)compressed[offset - 1] << 24)); |
| 6358 | if (unlikely (computed_crc != stream_crc)) |
| 6359 | { |
| 6360 | elf_uncompress_failed (); |
| 6361 | return 0; |
| 6362 | } |
| 6363 | offset -= 4; |
| 6364 | |
| 6365 | /* The index comes just before the footer. */ |
| 6366 | if (unlikely (offset < index_size + header_size)) |
| 6367 | { |
| 6368 | elf_uncompress_failed (); |
| 6369 | return 0; |
| 6370 | } |
| 6371 | |
| 6372 | footer_offset = offset; |
| 6373 | offset -= index_size; |
| 6374 | index_offset = offset; |
| 6375 | |
| 6376 | /* The index starts with a zero byte. */ |
| 6377 | if (unlikely (compressed[offset] != 0)) |
| 6378 | { |
| 6379 | elf_uncompress_failed (); |
| 6380 | return 0; |
| 6381 | } |
| 6382 | ++offset; |
| 6383 | |
| 6384 | /* Next is the number of blocks. We expect zero blocks for an empty |
| 6385 | stream, and otherwise a single block. */ |
| 6386 | if (unlikely (compressed[offset] == 0)) |
| 6387 | { |
| 6388 | *uncompressed = NULL; |
| 6389 | *uncompressed_size = 0; |
| 6390 | return 1; |
| 6391 | } |
| 6392 | if (unlikely (compressed[offset] != 1)) |
| 6393 | { |
| 6394 | elf_uncompress_failed (); |
| 6395 | return 0; |
| 6396 | } |
| 6397 | ++offset; |
| 6398 | |
| 6399 | /* Next is the compressed size and the uncompressed size. */ |
| 6400 | if (!elf_lzma_varint (compressed, compressed_size, poffset: &offset, |
| 6401 | val: &index_compressed_size)) |
| 6402 | return 0; |
| 6403 | if (!elf_lzma_varint (compressed, compressed_size, poffset: &offset, |
| 6404 | val: &index_uncompressed_size)) |
| 6405 | return 0; |
| 6406 | |
| 6407 | /* Pad to a four byte boundary. */ |
| 6408 | offset = (offset + 3) &~ (size_t) 3; |
| 6409 | |
| 6410 | /* Next is a CRC of the index. */ |
| 6411 | computed_crc = elf_crc32 (crc: 0, buf: compressed + index_offset, |
| 6412 | len: offset - index_offset); |
| 6413 | stream_crc = ((uint32_t)compressed[offset] |
| 6414 | | ((uint32_t)compressed[offset + 1] << 8) |
| 6415 | | ((uint32_t)compressed[offset + 2] << 16) |
| 6416 | | ((uint32_t)compressed[offset + 3] << 24)); |
| 6417 | if (unlikely (computed_crc != stream_crc)) |
| 6418 | { |
| 6419 | elf_uncompress_failed (); |
| 6420 | return 0; |
| 6421 | } |
| 6422 | offset += 4; |
| 6423 | |
| 6424 | /* We should now be back at the footer. */ |
| 6425 | if (unlikely (offset != footer_offset)) |
| 6426 | { |
| 6427 | elf_uncompress_failed (); |
| 6428 | return 0; |
| 6429 | } |
| 6430 | |
| 6431 | /* Allocate space to hold the uncompressed data. If we succeed in |
| 6432 | uncompressing the LZMA data, we never free this memory. */ |
| 6433 | mem = (unsigned char *) backtrace_alloc (state, size: index_uncompressed_size, |
| 6434 | error_callback, data); |
| 6435 | if (unlikely (mem == NULL)) |
| 6436 | return 0; |
| 6437 | *uncompressed = mem; |
| 6438 | *uncompressed_size = index_uncompressed_size; |
| 6439 | |
| 6440 | /* Allocate space for probabilities. */ |
| 6441 | probs = ((uint16_t *) |
| 6442 | backtrace_alloc (state, |
| 6443 | LZMA_PROB_TOTAL_COUNT * sizeof (uint16_t), |
| 6444 | error_callback, data)); |
| 6445 | if (unlikely (probs == NULL)) |
| 6446 | { |
| 6447 | backtrace_free (state, mem, size: index_uncompressed_size, error_callback, |
| 6448 | data); |
| 6449 | return 0; |
| 6450 | } |
| 6451 | |
| 6452 | /* Uncompress the block, which follows the header. */ |
| 6453 | offset = 12; |
| 6454 | if (!elf_uncompress_lzma_block (compressed, compressed_size, check, probs, |
| 6455 | uncompressed: mem, uncompressed_size: index_uncompressed_size, poffset: &offset)) |
| 6456 | { |
| 6457 | backtrace_free (state, mem, size: index_uncompressed_size, error_callback, |
| 6458 | data); |
| 6459 | return 0; |
| 6460 | } |
| 6461 | |
| 6462 | compressed_block_size = offset - 12; |
| 6463 | if (unlikely (compressed_block_size |
| 6464 | != ((index_compressed_size + 3) &~ (size_t) 3))) |
| 6465 | { |
| 6466 | elf_uncompress_failed (); |
| 6467 | backtrace_free (state, mem, size: index_uncompressed_size, error_callback, |
| 6468 | data); |
| 6469 | return 0; |
| 6470 | } |
| 6471 | |
| 6472 | offset = (offset + 3) &~ (size_t) 3; |
| 6473 | if (unlikely (offset != index_offset)) |
| 6474 | { |
| 6475 | elf_uncompress_failed (); |
| 6476 | backtrace_free (state, mem, size: index_uncompressed_size, error_callback, |
| 6477 | data); |
| 6478 | return 0; |
| 6479 | } |
| 6480 | |
| 6481 | return 1; |
| 6482 | } |
| 6483 | |
| 6484 | /* This function is a hook for testing the LZMA support. It is only |
| 6485 | used by tests. */ |
| 6486 | |
| 6487 | int |
| 6488 | backtrace_uncompress_lzma (struct backtrace_state *state, |
| 6489 | const unsigned char *compressed, |
| 6490 | size_t compressed_size, |
| 6491 | backtrace_error_callback error_callback, |
| 6492 | void *data, unsigned char **uncompressed, |
| 6493 | size_t *uncompressed_size) |
| 6494 | { |
| 6495 | return elf_uncompress_lzma (state, compressed, compressed_size, |
| 6496 | error_callback, data, uncompressed, |
| 6497 | uncompressed_size); |
| 6498 | } |
| 6499 | |
| 6500 | /* Add the backtrace data for one ELF file. Returns 1 on success, |
| 6501 | 0 on failure (in both cases descriptor is closed) or -1 if exe |
| 6502 | is non-zero and the ELF file is ET_DYN, which tells the caller that |
| 6503 | elf_add will need to be called on the descriptor again after |
| 6504 | base_address is determined. */ |
| 6505 | |
| 6506 | static int |
| 6507 | elf_add (struct backtrace_state *state, const char *filename, int descriptor, |
| 6508 | const unsigned char *memory, size_t memory_size, |
| 6509 | struct libbacktrace_base_address base_address, |
| 6510 | struct elf_ppc64_opd_data *caller_opd, |
| 6511 | backtrace_error_callback error_callback, void *data, |
| 6512 | fileline *fileline_fn, int *found_sym, int *found_dwarf, |
| 6513 | struct dwarf_data **fileline_entry, int exe, int debuginfo, |
| 6514 | const char *with_buildid_data, uint32_t with_buildid_size) |
| 6515 | { |
| 6516 | struct elf_view ehdr_view; |
| 6517 | b_elf_ehdr ehdr; |
| 6518 | off_t shoff; |
| 6519 | unsigned int shnum; |
| 6520 | unsigned int shstrndx; |
| 6521 | struct elf_view shdrs_view; |
| 6522 | int shdrs_view_valid; |
| 6523 | const b_elf_shdr *shdrs; |
| 6524 | const b_elf_shdr *shstrhdr; |
| 6525 | size_t shstr_size; |
| 6526 | off_t shstr_off; |
| 6527 | struct elf_view names_view; |
| 6528 | int names_view_valid; |
| 6529 | const char *names; |
| 6530 | unsigned int symtab_shndx; |
| 6531 | unsigned int dynsym_shndx; |
| 6532 | unsigned int i; |
| 6533 | struct debug_section_info sections[DEBUG_MAX]; |
| 6534 | struct debug_section_info zsections[DEBUG_MAX]; |
| 6535 | struct elf_view symtab_view; |
| 6536 | int symtab_view_valid; |
| 6537 | struct elf_view strtab_view; |
| 6538 | int strtab_view_valid; |
| 6539 | struct elf_view buildid_view; |
| 6540 | int buildid_view_valid; |
| 6541 | const char *buildid_data; |
| 6542 | uint32_t buildid_size; |
| 6543 | struct elf_view debuglink_view; |
| 6544 | int debuglink_view_valid; |
| 6545 | const char *debuglink_name; |
| 6546 | uint32_t debuglink_crc; |
| 6547 | struct elf_view debugaltlink_view; |
| 6548 | int debugaltlink_view_valid; |
| 6549 | const char *debugaltlink_name; |
| 6550 | const char *debugaltlink_buildid_data; |
| 6551 | uint32_t debugaltlink_buildid_size; |
| 6552 | struct elf_view gnu_debugdata_view; |
| 6553 | int gnu_debugdata_view_valid; |
| 6554 | size_t gnu_debugdata_size; |
| 6555 | unsigned char *gnu_debugdata_uncompressed; |
| 6556 | size_t gnu_debugdata_uncompressed_size; |
| 6557 | off_t min_offset; |
| 6558 | off_t max_offset; |
| 6559 | off_t debug_size; |
| 6560 | struct elf_view debug_view; |
| 6561 | int debug_view_valid; |
| 6562 | unsigned int using_debug_view; |
| 6563 | uint16_t *zdebug_table; |
| 6564 | struct elf_view split_debug_view[DEBUG_MAX]; |
| 6565 | unsigned char split_debug_view_valid[DEBUG_MAX]; |
| 6566 | struct elf_ppc64_opd_data opd_data, *opd; |
| 6567 | int opd_view_valid; |
| 6568 | struct dwarf_sections dwarf_sections; |
| 6569 | |
| 6570 | if (!debuginfo) |
| 6571 | { |
| 6572 | *found_sym = 0; |
| 6573 | *found_dwarf = 0; |
| 6574 | } |
| 6575 | |
| 6576 | shdrs_view_valid = 0; |
| 6577 | names_view_valid = 0; |
| 6578 | symtab_view_valid = 0; |
| 6579 | strtab_view_valid = 0; |
| 6580 | buildid_view_valid = 0; |
| 6581 | buildid_data = NULL; |
| 6582 | buildid_size = 0; |
| 6583 | debuglink_view_valid = 0; |
| 6584 | debuglink_name = NULL; |
| 6585 | debuglink_crc = 0; |
| 6586 | debugaltlink_view_valid = 0; |
| 6587 | debugaltlink_name = NULL; |
| 6588 | debugaltlink_buildid_data = NULL; |
| 6589 | debugaltlink_buildid_size = 0; |
| 6590 | gnu_debugdata_view_valid = 0; |
| 6591 | gnu_debugdata_size = 0; |
| 6592 | debug_view_valid = 0; |
| 6593 | memset (s: &split_debug_view_valid[0], c: 0, n: sizeof split_debug_view_valid); |
| 6594 | opd = NULL; |
| 6595 | opd_view_valid = 0; |
| 6596 | |
| 6597 | if (!elf_get_view (state, descriptor, memory, memory_size, offset: 0, size: sizeof ehdr, |
| 6598 | error_callback, data, view: &ehdr_view)) |
| 6599 | goto fail; |
| 6600 | |
| 6601 | memcpy (dest: &ehdr, src: ehdr_view.view.data, n: sizeof ehdr); |
| 6602 | |
| 6603 | elf_release_view (state, view: &ehdr_view, error_callback, data); |
| 6604 | |
| 6605 | if (ehdr.e_ident[EI_MAG0] != ELFMAG0 |
| 6606 | || ehdr.e_ident[EI_MAG1] != ELFMAG1 |
| 6607 | || ehdr.e_ident[EI_MAG2] != ELFMAG2 |
| 6608 | || ehdr.e_ident[EI_MAG3] != ELFMAG3) |
| 6609 | { |
| 6610 | error_callback (data, "executable file is not ELF" , 0); |
| 6611 | goto fail; |
| 6612 | } |
| 6613 | if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) |
| 6614 | { |
| 6615 | error_callback (data, "executable file is unrecognized ELF version" , 0); |
| 6616 | goto fail; |
| 6617 | } |
| 6618 | |
| 6619 | #if BACKTRACE_ELF_SIZE == 32 |
| 6620 | #define BACKTRACE_ELFCLASS ELFCLASS32 |
| 6621 | #else |
| 6622 | #define BACKTRACE_ELFCLASS ELFCLASS64 |
| 6623 | #endif |
| 6624 | |
| 6625 | if (ehdr.e_ident[EI_CLASS] != BACKTRACE_ELFCLASS) |
| 6626 | { |
| 6627 | error_callback (data, "executable file is unexpected ELF class" , 0); |
| 6628 | goto fail; |
| 6629 | } |
| 6630 | |
| 6631 | if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB |
| 6632 | && ehdr.e_ident[EI_DATA] != ELFDATA2MSB) |
| 6633 | { |
| 6634 | error_callback (data, "executable file has unknown endianness" , 0); |
| 6635 | goto fail; |
| 6636 | } |
| 6637 | |
| 6638 | /* If the executable is ET_DYN, it is either a PIE, or we are running |
| 6639 | directly a shared library with .interp. We need to wait for |
| 6640 | dl_iterate_phdr in that case to determine the actual base_address. */ |
| 6641 | if (exe && ehdr.e_type == ET_DYN) |
| 6642 | return -1; |
| 6643 | |
| 6644 | shoff = ehdr.e_shoff; |
| 6645 | shnum = ehdr.e_shnum; |
| 6646 | shstrndx = ehdr.e_shstrndx; |
| 6647 | |
| 6648 | if ((shnum == 0 || shstrndx == SHN_XINDEX) |
| 6649 | && shoff != 0) |
| 6650 | { |
| 6651 | struct elf_view shdr_view; |
| 6652 | const b_elf_shdr *shdr; |
| 6653 | |
| 6654 | if (!elf_get_view (state, descriptor, memory, memory_size, offset: shoff, |
| 6655 | size: sizeof shdr, error_callback, data, view: &shdr_view)) |
| 6656 | goto fail; |
| 6657 | |
| 6658 | shdr = (const b_elf_shdr *) shdr_view.view.data; |
| 6659 | |
| 6660 | if (shnum == 0) |
| 6661 | shnum = shdr->sh_size; |
| 6662 | |
| 6663 | if (shstrndx == SHN_XINDEX) |
| 6664 | { |
| 6665 | shstrndx = shdr->sh_link; |
| 6666 | |
| 6667 | /* Versions of the GNU binutils between 2.12 and 2.18 did |
| 6668 | not handle objects with more than SHN_LORESERVE sections |
| 6669 | correctly. All large section indexes were offset by |
| 6670 | 0x100. There is more information at |
| 6671 | http://sourceware.org/bugzilla/show_bug.cgi?id-5900 . |
| 6672 | Fortunately these object files are easy to detect, as the |
| 6673 | GNU binutils always put the section header string table |
| 6674 | near the end of the list of sections. Thus if the |
| 6675 | section header string table index is larger than the |
| 6676 | number of sections, then we know we have to subtract |
| 6677 | 0x100 to get the real section index. */ |
| 6678 | if (shstrndx >= shnum && shstrndx >= SHN_LORESERVE + 0x100) |
| 6679 | shstrndx -= 0x100; |
| 6680 | } |
| 6681 | |
| 6682 | elf_release_view (state, view: &shdr_view, error_callback, data); |
| 6683 | } |
| 6684 | |
| 6685 | if (shnum == 0 || shstrndx == 0) |
| 6686 | goto fail; |
| 6687 | |
| 6688 | /* To translate PC to file/line when using DWARF, we need to find |
| 6689 | the .debug_info and .debug_line sections. */ |
| 6690 | |
| 6691 | /* Read the section headers, skipping the first one. */ |
| 6692 | |
| 6693 | if (!elf_get_view (state, descriptor, memory, memory_size, |
| 6694 | offset: shoff + sizeof (b_elf_shdr), |
| 6695 | size: (shnum - 1) * sizeof (b_elf_shdr), |
| 6696 | error_callback, data, view: &shdrs_view)) |
| 6697 | goto fail; |
| 6698 | shdrs_view_valid = 1; |
| 6699 | shdrs = (const b_elf_shdr *) shdrs_view.view.data; |
| 6700 | |
| 6701 | /* Read the section names. */ |
| 6702 | |
| 6703 | shstrhdr = &shdrs[shstrndx - 1]; |
| 6704 | shstr_size = shstrhdr->sh_size; |
| 6705 | shstr_off = shstrhdr->sh_offset; |
| 6706 | |
| 6707 | if (!elf_get_view (state, descriptor, memory, memory_size, offset: shstr_off, |
| 6708 | size: shstrhdr->sh_size, error_callback, data, view: &names_view)) |
| 6709 | goto fail; |
| 6710 | names_view_valid = 1; |
| 6711 | names = (const char *) names_view.view.data; |
| 6712 | |
| 6713 | symtab_shndx = 0; |
| 6714 | dynsym_shndx = 0; |
| 6715 | |
| 6716 | memset (s: sections, c: 0, n: sizeof sections); |
| 6717 | memset (s: zsections, c: 0, n: sizeof zsections); |
| 6718 | |
| 6719 | /* Look for the symbol table. */ |
| 6720 | for (i = 1; i < shnum; ++i) |
| 6721 | { |
| 6722 | const b_elf_shdr *shdr; |
| 6723 | unsigned int sh_name; |
| 6724 | const char *name; |
| 6725 | int j; |
| 6726 | |
| 6727 | shdr = &shdrs[i - 1]; |
| 6728 | |
| 6729 | if (shdr->sh_type == SHT_SYMTAB) |
| 6730 | symtab_shndx = i; |
| 6731 | else if (shdr->sh_type == SHT_DYNSYM) |
| 6732 | dynsym_shndx = i; |
| 6733 | |
| 6734 | sh_name = shdr->sh_name; |
| 6735 | if (sh_name >= shstr_size) |
| 6736 | { |
| 6737 | error_callback (data, "ELF section name out of range" , 0); |
| 6738 | goto fail; |
| 6739 | } |
| 6740 | |
| 6741 | name = names + sh_name; |
| 6742 | |
| 6743 | for (j = 0; j < (int) DEBUG_MAX; ++j) |
| 6744 | { |
| 6745 | if (strcmp (s1: name, s2: dwarf_section_names[j]) == 0) |
| 6746 | { |
| 6747 | sections[j].offset = shdr->sh_offset; |
| 6748 | sections[j].size = shdr->sh_size; |
| 6749 | sections[j].compressed = (shdr->sh_flags & SHF_COMPRESSED) != 0; |
| 6750 | break; |
| 6751 | } |
| 6752 | } |
| 6753 | |
| 6754 | if (name[0] == '.' && name[1] == 'z') |
| 6755 | { |
| 6756 | for (j = 0; j < (int) DEBUG_MAX; ++j) |
| 6757 | { |
| 6758 | if (strcmp (s1: name + 2, s2: dwarf_section_names[j] + 1) == 0) |
| 6759 | { |
| 6760 | zsections[j].offset = shdr->sh_offset; |
| 6761 | zsections[j].size = shdr->sh_size; |
| 6762 | break; |
| 6763 | } |
| 6764 | } |
| 6765 | } |
| 6766 | |
| 6767 | /* Read the build ID if present. This could check for any |
| 6768 | SHT_NOTE section with the right note name and type, but gdb |
| 6769 | looks for a specific section name. */ |
| 6770 | if ((!debuginfo || with_buildid_data != NULL) |
| 6771 | && !buildid_view_valid |
| 6772 | && strcmp (s1: name, s2: ".note.gnu.build-id" ) == 0) |
| 6773 | { |
| 6774 | const b_elf_note *note; |
| 6775 | |
| 6776 | if (!elf_get_view (state, descriptor, memory, memory_size, |
| 6777 | offset: shdr->sh_offset, size: shdr->sh_size, error_callback, |
| 6778 | data, view: &buildid_view)) |
| 6779 | goto fail; |
| 6780 | |
| 6781 | buildid_view_valid = 1; |
| 6782 | note = (const b_elf_note *) buildid_view.view.data; |
| 6783 | if (note->type == NT_GNU_BUILD_ID |
| 6784 | && note->namesz == 4 |
| 6785 | && strncmp (s1: note->name, s2: "GNU" , n: 4) == 0 |
| 6786 | && shdr->sh_size <= 12 + ((note->namesz + 3) & ~ 3) + note->descsz) |
| 6787 | { |
| 6788 | buildid_data = ¬e->name[0] + ((note->namesz + 3) & ~ 3); |
| 6789 | buildid_size = note->descsz; |
| 6790 | } |
| 6791 | |
| 6792 | if (with_buildid_size != 0) |
| 6793 | { |
| 6794 | if (buildid_size != with_buildid_size) |
| 6795 | goto fail; |
| 6796 | |
| 6797 | if (memcmp (s1: buildid_data, s2: with_buildid_data, n: buildid_size) != 0) |
| 6798 | goto fail; |
| 6799 | } |
| 6800 | } |
| 6801 | |
| 6802 | /* Read the debuglink file if present. */ |
| 6803 | if (!debuginfo |
| 6804 | && !debuglink_view_valid |
| 6805 | && strcmp (s1: name, s2: ".gnu_debuglink" ) == 0) |
| 6806 | { |
| 6807 | const char *debuglink_data; |
| 6808 | size_t crc_offset; |
| 6809 | |
| 6810 | if (!elf_get_view (state, descriptor, memory, memory_size, |
| 6811 | offset: shdr->sh_offset, size: shdr->sh_size, error_callback, |
| 6812 | data, view: &debuglink_view)) |
| 6813 | goto fail; |
| 6814 | |
| 6815 | debuglink_view_valid = 1; |
| 6816 | debuglink_data = (const char *) debuglink_view.view.data; |
| 6817 | crc_offset = strnlen (string: debuglink_data, maxlen: shdr->sh_size); |
| 6818 | crc_offset = (crc_offset + 3) & ~3; |
| 6819 | if (crc_offset + 4 <= shdr->sh_size) |
| 6820 | { |
| 6821 | debuglink_name = debuglink_data; |
| 6822 | debuglink_crc = *(const uint32_t*)(debuglink_data + crc_offset); |
| 6823 | } |
| 6824 | } |
| 6825 | |
| 6826 | if (!debugaltlink_view_valid |
| 6827 | && strcmp (s1: name, s2: ".gnu_debugaltlink" ) == 0) |
| 6828 | { |
| 6829 | const char *debugaltlink_data; |
| 6830 | size_t debugaltlink_name_len; |
| 6831 | |
| 6832 | if (!elf_get_view (state, descriptor, memory, memory_size, |
| 6833 | offset: shdr->sh_offset, size: shdr->sh_size, error_callback, |
| 6834 | data, view: &debugaltlink_view)) |
| 6835 | goto fail; |
| 6836 | |
| 6837 | debugaltlink_view_valid = 1; |
| 6838 | debugaltlink_data = (const char *) debugaltlink_view.view.data; |
| 6839 | debugaltlink_name = debugaltlink_data; |
| 6840 | debugaltlink_name_len = strnlen (string: debugaltlink_data, maxlen: shdr->sh_size); |
| 6841 | if (debugaltlink_name_len < shdr->sh_size) |
| 6842 | { |
| 6843 | /* Include terminating zero. */ |
| 6844 | debugaltlink_name_len += 1; |
| 6845 | |
| 6846 | debugaltlink_buildid_data |
| 6847 | = debugaltlink_data + debugaltlink_name_len; |
| 6848 | debugaltlink_buildid_size = shdr->sh_size - debugaltlink_name_len; |
| 6849 | } |
| 6850 | } |
| 6851 | |
| 6852 | if (!debuginfo |
| 6853 | && !gnu_debugdata_view_valid |
| 6854 | && strcmp (s1: name, s2: ".gnu_debugdata" ) == 0) |
| 6855 | { |
| 6856 | if (!elf_get_view (state, descriptor, memory, memory_size, |
| 6857 | offset: shdr->sh_offset, size: shdr->sh_size, error_callback, |
| 6858 | data, view: &gnu_debugdata_view)) |
| 6859 | goto fail; |
| 6860 | |
| 6861 | gnu_debugdata_size = shdr->sh_size; |
| 6862 | gnu_debugdata_view_valid = 1; |
| 6863 | } |
| 6864 | |
| 6865 | /* Read the .opd section on PowerPC64 ELFv1. */ |
| 6866 | if (ehdr.e_machine == EM_PPC64 |
| 6867 | && (ehdr.e_flags & EF_PPC64_ABI) < 2 |
| 6868 | && shdr->sh_type == SHT_PROGBITS |
| 6869 | && strcmp (s1: name, s2: ".opd" ) == 0) |
| 6870 | { |
| 6871 | if (!elf_get_view (state, descriptor, memory, memory_size, |
| 6872 | offset: shdr->sh_offset, size: shdr->sh_size, error_callback, |
| 6873 | data, view: &opd_data.view)) |
| 6874 | goto fail; |
| 6875 | |
| 6876 | opd = &opd_data; |
| 6877 | opd->addr = shdr->sh_addr; |
| 6878 | opd->data = (const char *) opd_data.view.view.data; |
| 6879 | opd->size = shdr->sh_size; |
| 6880 | opd_view_valid = 1; |
| 6881 | } |
| 6882 | } |
| 6883 | |
| 6884 | /* A debuginfo file may not have a useful .opd section, but we can use the |
| 6885 | one from the original executable. */ |
| 6886 | if (opd == NULL) |
| 6887 | opd = caller_opd; |
| 6888 | |
| 6889 | if (symtab_shndx == 0) |
| 6890 | symtab_shndx = dynsym_shndx; |
| 6891 | if (symtab_shndx != 0) |
| 6892 | { |
| 6893 | const b_elf_shdr *symtab_shdr; |
| 6894 | unsigned int strtab_shndx; |
| 6895 | const b_elf_shdr *strtab_shdr; |
| 6896 | struct elf_syminfo_data *sdata; |
| 6897 | |
| 6898 | symtab_shdr = &shdrs[symtab_shndx - 1]; |
| 6899 | strtab_shndx = symtab_shdr->sh_link; |
| 6900 | if (strtab_shndx >= shnum) |
| 6901 | { |
| 6902 | error_callback (data, |
| 6903 | "ELF symbol table strtab link out of range" , 0); |
| 6904 | goto fail; |
| 6905 | } |
| 6906 | strtab_shdr = &shdrs[strtab_shndx - 1]; |
| 6907 | |
| 6908 | if (!elf_get_view (state, descriptor, memory, memory_size, |
| 6909 | offset: symtab_shdr->sh_offset, size: symtab_shdr->sh_size, |
| 6910 | error_callback, data, view: &symtab_view)) |
| 6911 | goto fail; |
| 6912 | symtab_view_valid = 1; |
| 6913 | |
| 6914 | if (!elf_get_view (state, descriptor, memory, memory_size, |
| 6915 | offset: strtab_shdr->sh_offset, size: strtab_shdr->sh_size, |
| 6916 | error_callback, data, view: &strtab_view)) |
| 6917 | goto fail; |
| 6918 | strtab_view_valid = 1; |
| 6919 | |
| 6920 | sdata = ((struct elf_syminfo_data *) |
| 6921 | backtrace_alloc (state, size: sizeof *sdata, error_callback, data)); |
| 6922 | if (sdata == NULL) |
| 6923 | goto fail; |
| 6924 | |
| 6925 | if (!elf_initialize_syminfo (state, base_address, |
| 6926 | symtab_view.view.data, symtab_shdr->sh_size, |
| 6927 | strtab_view.view.data, strtab_shdr->sh_size, |
| 6928 | error_callback, data, sdata, opd)) |
| 6929 | { |
| 6930 | backtrace_free (state, mem: sdata, size: sizeof *sdata, error_callback, data); |
| 6931 | goto fail; |
| 6932 | } |
| 6933 | |
| 6934 | /* We no longer need the symbol table, but we hold on to the |
| 6935 | string table permanently. */ |
| 6936 | elf_release_view (state, view: &symtab_view, error_callback, data); |
| 6937 | symtab_view_valid = 0; |
| 6938 | strtab_view_valid = 0; |
| 6939 | |
| 6940 | *found_sym = 1; |
| 6941 | |
| 6942 | elf_add_syminfo_data (state, edata: sdata); |
| 6943 | } |
| 6944 | |
| 6945 | elf_release_view (state, view: &shdrs_view, error_callback, data); |
| 6946 | shdrs_view_valid = 0; |
| 6947 | elf_release_view (state, view: &names_view, error_callback, data); |
| 6948 | names_view_valid = 0; |
| 6949 | |
| 6950 | /* If the debug info is in a separate file, read that one instead. */ |
| 6951 | |
| 6952 | if (buildid_data != NULL) |
| 6953 | { |
| 6954 | int d; |
| 6955 | |
| 6956 | d = elf_open_debugfile_by_buildid (state, buildid_data, buildid_size, |
| 6957 | error_callback, data); |
| 6958 | if (d >= 0) |
| 6959 | { |
| 6960 | int ret; |
| 6961 | |
| 6962 | elf_release_view (state, view: &buildid_view, error_callback, data); |
| 6963 | if (debuglink_view_valid) |
| 6964 | elf_release_view (state, view: &debuglink_view, error_callback, data); |
| 6965 | if (debugaltlink_view_valid) |
| 6966 | elf_release_view (state, view: &debugaltlink_view, error_callback, data); |
| 6967 | ret = elf_add (state, filename: "" , descriptor: d, NULL, memory_size: 0, base_address, caller_opd: opd, |
| 6968 | error_callback, data, fileline_fn, found_sym, |
| 6969 | found_dwarf, NULL, exe: 0, debuginfo: 1, NULL, with_buildid_size: 0); |
| 6970 | if (ret < 0) |
| 6971 | backtrace_close (descriptor: d, error_callback, data); |
| 6972 | else if (descriptor >= 0) |
| 6973 | backtrace_close (descriptor, error_callback, data); |
| 6974 | return ret; |
| 6975 | } |
| 6976 | } |
| 6977 | |
| 6978 | if (buildid_view_valid) |
| 6979 | { |
| 6980 | elf_release_view (state, view: &buildid_view, error_callback, data); |
| 6981 | buildid_view_valid = 0; |
| 6982 | } |
| 6983 | |
| 6984 | if (debuglink_name != NULL) |
| 6985 | { |
| 6986 | int d; |
| 6987 | |
| 6988 | d = elf_open_debugfile_by_debuglink (state, filename, debuglink_name, |
| 6989 | debuglink_crc, error_callback, |
| 6990 | data); |
| 6991 | if (d >= 0) |
| 6992 | { |
| 6993 | int ret; |
| 6994 | |
| 6995 | elf_release_view (state, view: &debuglink_view, error_callback, data); |
| 6996 | if (debugaltlink_view_valid) |
| 6997 | elf_release_view (state, view: &debugaltlink_view, error_callback, data); |
| 6998 | ret = elf_add (state, filename: "" , descriptor: d, NULL, memory_size: 0, base_address, caller_opd: opd, |
| 6999 | error_callback, data, fileline_fn, found_sym, |
| 7000 | found_dwarf, NULL, exe: 0, debuginfo: 1, NULL, with_buildid_size: 0); |
| 7001 | if (ret < 0) |
| 7002 | backtrace_close (descriptor: d, error_callback, data); |
| 7003 | else if (descriptor >= 0) |
| 7004 | backtrace_close(descriptor, error_callback, data); |
| 7005 | return ret; |
| 7006 | } |
| 7007 | } |
| 7008 | |
| 7009 | if (debuglink_view_valid) |
| 7010 | { |
| 7011 | elf_release_view (state, view: &debuglink_view, error_callback, data); |
| 7012 | debuglink_view_valid = 0; |
| 7013 | } |
| 7014 | |
| 7015 | struct dwarf_data *fileline_altlink = NULL; |
| 7016 | if (debugaltlink_name != NULL) |
| 7017 | { |
| 7018 | int d; |
| 7019 | |
| 7020 | d = elf_open_debugfile_by_debuglink (state, filename, debuglink_name: debugaltlink_name, |
| 7021 | debuglink_crc: 0, error_callback, data); |
| 7022 | if (d >= 0) |
| 7023 | { |
| 7024 | int ret; |
| 7025 | |
| 7026 | ret = elf_add (state, filename, descriptor: d, NULL, memory_size: 0, base_address, caller_opd: opd, |
| 7027 | error_callback, data, fileline_fn, found_sym, |
| 7028 | found_dwarf, fileline_entry: &fileline_altlink, exe: 0, debuginfo: 1, |
| 7029 | with_buildid_data: debugaltlink_buildid_data, with_buildid_size: debugaltlink_buildid_size); |
| 7030 | elf_release_view (state, view: &debugaltlink_view, error_callback, data); |
| 7031 | debugaltlink_view_valid = 0; |
| 7032 | if (ret < 0) |
| 7033 | { |
| 7034 | backtrace_close (descriptor: d, error_callback, data); |
| 7035 | return ret; |
| 7036 | } |
| 7037 | } |
| 7038 | } |
| 7039 | |
| 7040 | if (debugaltlink_view_valid) |
| 7041 | { |
| 7042 | elf_release_view (state, view: &debugaltlink_view, error_callback, data); |
| 7043 | debugaltlink_view_valid = 0; |
| 7044 | } |
| 7045 | |
| 7046 | if (gnu_debugdata_view_valid) |
| 7047 | { |
| 7048 | int ret; |
| 7049 | |
| 7050 | ret = elf_uncompress_lzma (state, |
| 7051 | compressed: ((const unsigned char *) |
| 7052 | gnu_debugdata_view.view.data), |
| 7053 | compressed_size: gnu_debugdata_size, error_callback, data, |
| 7054 | uncompressed: &gnu_debugdata_uncompressed, |
| 7055 | uncompressed_size: &gnu_debugdata_uncompressed_size); |
| 7056 | |
| 7057 | elf_release_view (state, view: &gnu_debugdata_view, error_callback, data); |
| 7058 | gnu_debugdata_view_valid = 0; |
| 7059 | |
| 7060 | if (ret) |
| 7061 | { |
| 7062 | ret = elf_add (state, filename, descriptor: -1, memory: gnu_debugdata_uncompressed, |
| 7063 | memory_size: gnu_debugdata_uncompressed_size, base_address, caller_opd: opd, |
| 7064 | error_callback, data, fileline_fn, found_sym, |
| 7065 | found_dwarf, NULL, exe: 0, debuginfo: 0, NULL, with_buildid_size: 0); |
| 7066 | if (ret >= 0 && descriptor >= 0) |
| 7067 | backtrace_close(descriptor, error_callback, data); |
| 7068 | return ret; |
| 7069 | } |
| 7070 | } |
| 7071 | |
| 7072 | if (opd_view_valid) |
| 7073 | { |
| 7074 | elf_release_view (state, view: &opd->view, error_callback, data); |
| 7075 | opd_view_valid = 0; |
| 7076 | opd = NULL; |
| 7077 | } |
| 7078 | |
| 7079 | /* Read all the debug sections in a single view, since they are |
| 7080 | probably adjacent in the file. If any of sections are |
| 7081 | uncompressed, we never release this view. */ |
| 7082 | |
| 7083 | min_offset = 0; |
| 7084 | max_offset = 0; |
| 7085 | debug_size = 0; |
| 7086 | for (i = 0; i < (int) DEBUG_MAX; ++i) |
| 7087 | { |
| 7088 | off_t end; |
| 7089 | |
| 7090 | if (sections[i].size != 0) |
| 7091 | { |
| 7092 | if (min_offset == 0 || sections[i].offset < min_offset) |
| 7093 | min_offset = sections[i].offset; |
| 7094 | end = sections[i].offset + sections[i].size; |
| 7095 | if (end > max_offset) |
| 7096 | max_offset = end; |
| 7097 | debug_size += sections[i].size; |
| 7098 | } |
| 7099 | if (zsections[i].size != 0) |
| 7100 | { |
| 7101 | if (min_offset == 0 || zsections[i].offset < min_offset) |
| 7102 | min_offset = zsections[i].offset; |
| 7103 | end = zsections[i].offset + zsections[i].size; |
| 7104 | if (end > max_offset) |
| 7105 | max_offset = end; |
| 7106 | debug_size += zsections[i].size; |
| 7107 | } |
| 7108 | } |
| 7109 | if (min_offset == 0 || max_offset == 0) |
| 7110 | { |
| 7111 | if (descriptor >= 0) |
| 7112 | { |
| 7113 | if (!backtrace_close (descriptor, error_callback, data)) |
| 7114 | goto fail; |
| 7115 | } |
| 7116 | return 1; |
| 7117 | } |
| 7118 | |
| 7119 | /* If the total debug section size is large, assume that there are |
| 7120 | gaps between the sections, and read them individually. */ |
| 7121 | |
| 7122 | if (max_offset - min_offset < 0x20000000 |
| 7123 | || max_offset - min_offset < debug_size + 0x10000) |
| 7124 | { |
| 7125 | if (!elf_get_view (state, descriptor, memory, memory_size, offset: min_offset, |
| 7126 | size: max_offset - min_offset, error_callback, data, |
| 7127 | view: &debug_view)) |
| 7128 | goto fail; |
| 7129 | debug_view_valid = 1; |
| 7130 | } |
| 7131 | else |
| 7132 | { |
| 7133 | memset (s: &split_debug_view[0], c: 0, n: sizeof split_debug_view); |
| 7134 | for (i = 0; i < (int) DEBUG_MAX; ++i) |
| 7135 | { |
| 7136 | struct debug_section_info *dsec; |
| 7137 | |
| 7138 | if (sections[i].size != 0) |
| 7139 | dsec = §ions[i]; |
| 7140 | else if (zsections[i].size != 0) |
| 7141 | dsec = &zsections[i]; |
| 7142 | else |
| 7143 | continue; |
| 7144 | |
| 7145 | if (!elf_get_view (state, descriptor, memory, memory_size, |
| 7146 | offset: dsec->offset, size: dsec->size, error_callback, data, |
| 7147 | view: &split_debug_view[i])) |
| 7148 | goto fail; |
| 7149 | split_debug_view_valid[i] = 1; |
| 7150 | |
| 7151 | if (sections[i].size != 0) |
| 7152 | sections[i].data = ((const unsigned char *) |
| 7153 | split_debug_view[i].view.data); |
| 7154 | else |
| 7155 | zsections[i].data = ((const unsigned char *) |
| 7156 | split_debug_view[i].view.data); |
| 7157 | } |
| 7158 | } |
| 7159 | |
| 7160 | /* We've read all we need from the executable. */ |
| 7161 | if (descriptor >= 0) |
| 7162 | { |
| 7163 | if (!backtrace_close (descriptor, error_callback, data)) |
| 7164 | goto fail; |
| 7165 | descriptor = -1; |
| 7166 | } |
| 7167 | |
| 7168 | using_debug_view = 0; |
| 7169 | if (debug_view_valid) |
| 7170 | { |
| 7171 | for (i = 0; i < (int) DEBUG_MAX; ++i) |
| 7172 | { |
| 7173 | if (sections[i].size == 0) |
| 7174 | sections[i].data = NULL; |
| 7175 | else |
| 7176 | { |
| 7177 | sections[i].data = ((const unsigned char *) debug_view.view.data |
| 7178 | + (sections[i].offset - min_offset)); |
| 7179 | ++using_debug_view; |
| 7180 | } |
| 7181 | |
| 7182 | if (zsections[i].size == 0) |
| 7183 | zsections[i].data = NULL; |
| 7184 | else |
| 7185 | zsections[i].data = ((const unsigned char *) debug_view.view.data |
| 7186 | + (zsections[i].offset - min_offset)); |
| 7187 | } |
| 7188 | } |
| 7189 | |
| 7190 | /* Uncompress the old format (--compress-debug-sections=zlib-gnu). */ |
| 7191 | |
| 7192 | zdebug_table = NULL; |
| 7193 | for (i = 0; i < (int) DEBUG_MAX; ++i) |
| 7194 | { |
| 7195 | if (sections[i].size == 0 && zsections[i].size > 0) |
| 7196 | { |
| 7197 | unsigned char *uncompressed_data; |
| 7198 | size_t uncompressed_size; |
| 7199 | |
| 7200 | if (zdebug_table == NULL) |
| 7201 | { |
| 7202 | zdebug_table = ((uint16_t *) |
| 7203 | backtrace_alloc (state, ZLIB_TABLE_SIZE, |
| 7204 | error_callback, data)); |
| 7205 | if (zdebug_table == NULL) |
| 7206 | goto fail; |
| 7207 | } |
| 7208 | |
| 7209 | uncompressed_data = NULL; |
| 7210 | uncompressed_size = 0; |
| 7211 | if (!elf_uncompress_zdebug (state, compressed: zsections[i].data, |
| 7212 | compressed_size: zsections[i].size, zdebug_table, |
| 7213 | error_callback, data, |
| 7214 | uncompressed: &uncompressed_data, uncompressed_size: &uncompressed_size)) |
| 7215 | goto fail; |
| 7216 | sections[i].data = uncompressed_data; |
| 7217 | sections[i].size = uncompressed_size; |
| 7218 | sections[i].compressed = 0; |
| 7219 | |
| 7220 | if (split_debug_view_valid[i]) |
| 7221 | { |
| 7222 | elf_release_view (state, view: &split_debug_view[i], |
| 7223 | error_callback, data); |
| 7224 | split_debug_view_valid[i] = 0; |
| 7225 | } |
| 7226 | } |
| 7227 | } |
| 7228 | |
| 7229 | if (zdebug_table != NULL) |
| 7230 | { |
| 7231 | backtrace_free (state, mem: zdebug_table, ZLIB_TABLE_SIZE, |
| 7232 | error_callback, data); |
| 7233 | zdebug_table = NULL; |
| 7234 | } |
| 7235 | |
| 7236 | /* Uncompress the official ELF format |
| 7237 | (--compress-debug-sections=zlib-gabi, --compress-debug-sections=zstd). */ |
| 7238 | for (i = 0; i < (int) DEBUG_MAX; ++i) |
| 7239 | { |
| 7240 | unsigned char *uncompressed_data; |
| 7241 | size_t uncompressed_size; |
| 7242 | |
| 7243 | if (sections[i].size == 0 || !sections[i].compressed) |
| 7244 | continue; |
| 7245 | |
| 7246 | if (zdebug_table == NULL) |
| 7247 | { |
| 7248 | zdebug_table = ((uint16_t *) |
| 7249 | backtrace_alloc (state, ZDEBUG_TABLE_SIZE, |
| 7250 | error_callback, data)); |
| 7251 | if (zdebug_table == NULL) |
| 7252 | goto fail; |
| 7253 | } |
| 7254 | |
| 7255 | uncompressed_data = NULL; |
| 7256 | uncompressed_size = 0; |
| 7257 | if (!elf_uncompress_chdr (state, compressed: sections[i].data, compressed_size: sections[i].size, |
| 7258 | zdebug_table, error_callback, data, |
| 7259 | uncompressed: &uncompressed_data, uncompressed_size: &uncompressed_size)) |
| 7260 | goto fail; |
| 7261 | sections[i].data = uncompressed_data; |
| 7262 | sections[i].size = uncompressed_size; |
| 7263 | sections[i].compressed = 0; |
| 7264 | |
| 7265 | if (debug_view_valid) |
| 7266 | --using_debug_view; |
| 7267 | else if (split_debug_view_valid[i]) |
| 7268 | { |
| 7269 | elf_release_view (state, view: &split_debug_view[i], error_callback, data); |
| 7270 | split_debug_view_valid[i] = 0; |
| 7271 | } |
| 7272 | } |
| 7273 | |
| 7274 | if (zdebug_table != NULL) |
| 7275 | backtrace_free (state, mem: zdebug_table, ZDEBUG_TABLE_SIZE, |
| 7276 | error_callback, data); |
| 7277 | |
| 7278 | if (debug_view_valid && using_debug_view == 0) |
| 7279 | { |
| 7280 | elf_release_view (state, view: &debug_view, error_callback, data); |
| 7281 | debug_view_valid = 0; |
| 7282 | } |
| 7283 | |
| 7284 | for (i = 0; i < (int) DEBUG_MAX; ++i) |
| 7285 | { |
| 7286 | dwarf_sections.data[i] = sections[i].data; |
| 7287 | dwarf_sections.size[i] = sections[i].size; |
| 7288 | } |
| 7289 | |
| 7290 | if (!backtrace_dwarf_add (state, base_address, dwarf_sections: &dwarf_sections, |
| 7291 | is_bigendian: ehdr.e_ident[EI_DATA] == ELFDATA2MSB, |
| 7292 | fileline_altlink, |
| 7293 | error_callback, data, fileline_fn, |
| 7294 | fileline_entry)) |
| 7295 | goto fail; |
| 7296 | |
| 7297 | *found_dwarf = 1; |
| 7298 | |
| 7299 | return 1; |
| 7300 | |
| 7301 | fail: |
| 7302 | if (shdrs_view_valid) |
| 7303 | elf_release_view (state, view: &shdrs_view, error_callback, data); |
| 7304 | if (names_view_valid) |
| 7305 | elf_release_view (state, view: &names_view, error_callback, data); |
| 7306 | if (symtab_view_valid) |
| 7307 | elf_release_view (state, view: &symtab_view, error_callback, data); |
| 7308 | if (strtab_view_valid) |
| 7309 | elf_release_view (state, view: &strtab_view, error_callback, data); |
| 7310 | if (debuglink_view_valid) |
| 7311 | elf_release_view (state, view: &debuglink_view, error_callback, data); |
| 7312 | if (debugaltlink_view_valid) |
| 7313 | elf_release_view (state, view: &debugaltlink_view, error_callback, data); |
| 7314 | if (gnu_debugdata_view_valid) |
| 7315 | elf_release_view (state, view: &gnu_debugdata_view, error_callback, data); |
| 7316 | if (buildid_view_valid) |
| 7317 | elf_release_view (state, view: &buildid_view, error_callback, data); |
| 7318 | if (debug_view_valid) |
| 7319 | elf_release_view (state, view: &debug_view, error_callback, data); |
| 7320 | for (i = 0; i < (int) DEBUG_MAX; ++i) |
| 7321 | { |
| 7322 | if (split_debug_view_valid[i]) |
| 7323 | elf_release_view (state, view: &split_debug_view[i], error_callback, data); |
| 7324 | } |
| 7325 | if (opd_view_valid) |
| 7326 | elf_release_view (state, view: &opd->view, error_callback, data); |
| 7327 | if (descriptor >= 0) |
| 7328 | backtrace_close (descriptor, error_callback, data); |
| 7329 | return 0; |
| 7330 | } |
| 7331 | |
| 7332 | /* Data passed to phdr_callback. */ |
| 7333 | |
| 7334 | struct phdr_data |
| 7335 | { |
| 7336 | struct backtrace_state *state; |
| 7337 | backtrace_error_callback error_callback; |
| 7338 | void *data; |
| 7339 | fileline *fileline_fn; |
| 7340 | int *found_sym; |
| 7341 | int *found_dwarf; |
| 7342 | const char *exe_filename; |
| 7343 | int exe_descriptor; |
| 7344 | }; |
| 7345 | |
| 7346 | /* Callback passed to dl_iterate_phdr. Load debug info from shared |
| 7347 | libraries. */ |
| 7348 | |
| 7349 | static int |
| 7350 | #ifdef __i386__ |
| 7351 | __attribute__ ((__force_align_arg_pointer__)) |
| 7352 | #endif |
| 7353 | phdr_callback (struct dl_phdr_info *info, size_t size ATTRIBUTE_UNUSED, |
| 7354 | void *pdata) |
| 7355 | { |
| 7356 | struct phdr_data *pd = (struct phdr_data *) pdata; |
| 7357 | const char *filename; |
| 7358 | int descriptor; |
| 7359 | int does_not_exist; |
| 7360 | struct libbacktrace_base_address base_address; |
| 7361 | fileline elf_fileline_fn; |
| 7362 | int found_dwarf; |
| 7363 | |
| 7364 | /* There is not much we can do if we don't have the module name, |
| 7365 | unless executable is ET_DYN, where we expect the very first |
| 7366 | phdr_callback to be for the PIE. */ |
| 7367 | if (info->dlpi_name == NULL || info->dlpi_name[0] == '\0') |
| 7368 | { |
| 7369 | if (pd->exe_descriptor == -1) |
| 7370 | return 0; |
| 7371 | filename = pd->exe_filename; |
| 7372 | descriptor = pd->exe_descriptor; |
| 7373 | pd->exe_descriptor = -1; |
| 7374 | } |
| 7375 | else |
| 7376 | { |
| 7377 | if (pd->exe_descriptor != -1) |
| 7378 | { |
| 7379 | backtrace_close (descriptor: pd->exe_descriptor, error_callback: pd->error_callback, data: pd->data); |
| 7380 | pd->exe_descriptor = -1; |
| 7381 | } |
| 7382 | |
| 7383 | filename = info->dlpi_name; |
| 7384 | descriptor = backtrace_open (filename: info->dlpi_name, error_callback: pd->error_callback, |
| 7385 | data: pd->data, does_not_exist: &does_not_exist); |
| 7386 | if (descriptor < 0) |
| 7387 | return 0; |
| 7388 | } |
| 7389 | |
| 7390 | base_address.m = info->dlpi_addr; |
| 7391 | if (elf_add (state: pd->state, filename, descriptor, NULL, memory_size: 0, base_address, NULL, |
| 7392 | error_callback: pd->error_callback, data: pd->data, fileline_fn: &elf_fileline_fn, found_sym: pd->found_sym, |
| 7393 | found_dwarf: &found_dwarf, NULL, exe: 0, debuginfo: 0, NULL, with_buildid_size: 0)) |
| 7394 | { |
| 7395 | if (found_dwarf) |
| 7396 | { |
| 7397 | *pd->found_dwarf = 1; |
| 7398 | *pd->fileline_fn = elf_fileline_fn; |
| 7399 | } |
| 7400 | } |
| 7401 | |
| 7402 | return 0; |
| 7403 | } |
| 7404 | |
| 7405 | /* Initialize the backtrace data we need from an ELF executable. At |
| 7406 | the ELF level, all we need to do is find the debug info |
| 7407 | sections. */ |
| 7408 | |
| 7409 | int |
| 7410 | backtrace_initialize (struct backtrace_state *state, const char *filename, |
| 7411 | int descriptor, backtrace_error_callback error_callback, |
| 7412 | void *data, fileline *fileline_fn) |
| 7413 | { |
| 7414 | int ret; |
| 7415 | int found_sym; |
| 7416 | int found_dwarf; |
| 7417 | fileline elf_fileline_fn = elf_nodebug; |
| 7418 | struct phdr_data pd; |
| 7419 | |
| 7420 | /* When using fdpic we must use dl_iterate_phdr for all modules, including |
| 7421 | the main executable, so that we can get the right base address |
| 7422 | mapping. */ |
| 7423 | if (!libbacktrace_using_fdpic ()) |
| 7424 | { |
| 7425 | struct libbacktrace_base_address zero_base_address; |
| 7426 | |
| 7427 | memset (s: &zero_base_address, c: 0, n: sizeof zero_base_address); |
| 7428 | ret = elf_add (state, filename, descriptor, NULL, memory_size: 0, base_address: zero_base_address, |
| 7429 | NULL, error_callback, data, fileline_fn: &elf_fileline_fn, found_sym: &found_sym, |
| 7430 | found_dwarf: &found_dwarf, NULL, exe: 1, debuginfo: 0, NULL, with_buildid_size: 0); |
| 7431 | if (!ret) |
| 7432 | return 0; |
| 7433 | } |
| 7434 | |
| 7435 | pd.state = state; |
| 7436 | pd.error_callback = error_callback; |
| 7437 | pd.data = data; |
| 7438 | pd.fileline_fn = &elf_fileline_fn; |
| 7439 | pd.found_sym = &found_sym; |
| 7440 | pd.found_dwarf = &found_dwarf; |
| 7441 | pd.exe_filename = filename; |
| 7442 | pd.exe_descriptor = ret < 0 ? descriptor : -1; |
| 7443 | |
| 7444 | dl_iterate_phdr (callback: phdr_callback, data: (void *) &pd); |
| 7445 | |
| 7446 | if (!state->threaded) |
| 7447 | { |
| 7448 | if (found_sym) |
| 7449 | state->syminfo_fn = elf_syminfo; |
| 7450 | else if (state->syminfo_fn == NULL) |
| 7451 | state->syminfo_fn = elf_nosyms; |
| 7452 | } |
| 7453 | else |
| 7454 | { |
| 7455 | if (found_sym) |
| 7456 | backtrace_atomic_store_pointer (&state->syminfo_fn, elf_syminfo); |
| 7457 | else |
| 7458 | (void) __sync_bool_compare_and_swap (&state->syminfo_fn, NULL, |
| 7459 | elf_nosyms); |
| 7460 | } |
| 7461 | |
| 7462 | if (!state->threaded) |
| 7463 | *fileline_fn = state->fileline_fn; |
| 7464 | else |
| 7465 | *fileline_fn = backtrace_atomic_load_pointer (&state->fileline_fn); |
| 7466 | |
| 7467 | if (*fileline_fn == NULL || *fileline_fn == elf_nodebug) |
| 7468 | *fileline_fn = elf_fileline_fn; |
| 7469 | |
| 7470 | return 1; |
| 7471 | } |
| 7472 | |