1//===- InstrProf.cpp - Instrumented profiling format support --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains support for clang's instrumentation based PGO and
10// coverage.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ProfileData/InstrProf.h"
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/ADT/StringExtras.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/Config/config.h"
20#include "llvm/IR/Constant.h"
21#include "llvm/IR/Constants.h"
22#include "llvm/IR/Function.h"
23#include "llvm/IR/GlobalValue.h"
24#include "llvm/IR/GlobalVariable.h"
25#include "llvm/IR/Instruction.h"
26#include "llvm/IR/LLVMContext.h"
27#include "llvm/IR/MDBuilder.h"
28#include "llvm/IR/Metadata.h"
29#include "llvm/IR/Module.h"
30#include "llvm/IR/Type.h"
31#include "llvm/ProfileData/InstrProfReader.h"
32#include "llvm/Support/Casting.h"
33#include "llvm/Support/CommandLine.h"
34#include "llvm/Support/Compiler.h"
35#include "llvm/Support/Compression.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/Endian.h"
38#include "llvm/Support/Error.h"
39#include "llvm/Support/ErrorHandling.h"
40#include "llvm/Support/LEB128.h"
41#include "llvm/Support/MathExtras.h"
42#include "llvm/Support/Path.h"
43#include "llvm/Support/SwapByteOrder.h"
44#include "llvm/Support/VirtualFileSystem.h"
45#include "llvm/Support/raw_ostream.h"
46#include "llvm/TargetParser/Triple.h"
47#include <algorithm>
48#include <cassert>
49#include <cstddef>
50#include <cstdint>
51#include <cstring>
52#include <memory>
53#include <string>
54#include <system_error>
55#include <type_traits>
56#include <utility>
57#include <vector>
58
59using namespace llvm;
60
61#define DEBUG_TYPE "instrprof"
62
63static cl::opt<bool> StaticFuncFullModulePrefix(
64 "static-func-full-module-prefix", cl::init(Val: true), cl::Hidden,
65 cl::desc("Use full module build paths in the profile counter names for "
66 "static functions."));
67
68// This option is tailored to users that have different top-level directory in
69// profile-gen and profile-use compilation. Users need to specific the number
70// of levels to strip. A value larger than the number of directories in the
71// source file will strip all the directory names and only leave the basename.
72//
73// Note current ThinLTO module importing for the indirect-calls assumes
74// the source directory name not being stripped. A non-zero option value here
75// can potentially prevent some inter-module indirect-call-promotions.
76static cl::opt<unsigned> StaticFuncStripDirNamePrefix(
77 "static-func-strip-dirname-prefix", cl::init(Val: 0), cl::Hidden,
78 cl::desc("Strip specified level of directory name from source path in "
79 "the profile counter name for static functions."));
80
81static std::string getInstrProfErrString(instrprof_error Err,
82 const std::string &ErrMsg = "") {
83 std::string Msg;
84 raw_string_ostream OS(Msg);
85
86 switch (Err) {
87 case instrprof_error::success:
88 OS << "success";
89 break;
90 case instrprof_error::eof:
91 OS << "end of File";
92 break;
93 case instrprof_error::unrecognized_format:
94 OS << "unrecognized instrumentation profile encoding format";
95 break;
96 case instrprof_error::bad_magic:
97 OS << "invalid instrumentation profile data (bad magic)";
98 break;
99 case instrprof_error::bad_header:
100 OS << "invalid instrumentation profile data (file header is corrupt)";
101 break;
102 case instrprof_error::unsupported_version:
103 OS << "unsupported instrumentation profile format version";
104 break;
105 case instrprof_error::unsupported_hash_type:
106 OS << "unsupported instrumentation profile hash type";
107 break;
108 case instrprof_error::too_large:
109 OS << "too much profile data";
110 break;
111 case instrprof_error::truncated:
112 OS << "truncated profile data";
113 break;
114 case instrprof_error::malformed:
115 OS << "malformed instrumentation profile data";
116 break;
117 case instrprof_error::missing_correlation_info:
118 OS << "debug info/binary for correlation is required";
119 break;
120 case instrprof_error::unexpected_correlation_info:
121 OS << "debug info/binary for correlation is not necessary";
122 break;
123 case instrprof_error::unable_to_correlate_profile:
124 OS << "unable to correlate profile";
125 break;
126 case instrprof_error::invalid_prof:
127 OS << "invalid profile created. Please file a bug "
128 "at: " BUG_REPORT_URL
129 " and include the profraw files that caused this error.";
130 break;
131 case instrprof_error::unknown_function:
132 OS << "no profile data available for function";
133 break;
134 case instrprof_error::hash_mismatch:
135 OS << "function control flow change detected (hash mismatch)";
136 break;
137 case instrprof_error::count_mismatch:
138 OS << "function basic block count change detected (counter mismatch)";
139 break;
140 case instrprof_error::bitmap_mismatch:
141 OS << "function bitmap size change detected (bitmap size mismatch)";
142 break;
143 case instrprof_error::counter_overflow:
144 OS << "counter overflow";
145 break;
146 case instrprof_error::value_site_count_mismatch:
147 OS << "function value site count change detected (counter mismatch)";
148 break;
149 case instrprof_error::compress_failed:
150 OS << "failed to compress data (zlib)";
151 break;
152 case instrprof_error::uncompress_failed:
153 OS << "failed to uncompress data (zlib)";
154 break;
155 case instrprof_error::empty_raw_profile:
156 OS << "empty raw profile file";
157 break;
158 case instrprof_error::zlib_unavailable:
159 OS << "profile uses zlib compression but the profile reader was built "
160 "without zlib support";
161 break;
162 case instrprof_error::raw_profile_version_mismatch:
163 OS << "raw profile version mismatch";
164 break;
165 case instrprof_error::counter_value_too_large:
166 OS << "excessively large counter value suggests corrupted profile data";
167 break;
168 }
169
170 // If optional error message is not empty, append it to the message.
171 if (!ErrMsg.empty())
172 OS << ": " << ErrMsg;
173
174 return OS.str();
175}
176
177namespace {
178
179// FIXME: This class is only here to support the transition to llvm::Error. It
180// will be removed once this transition is complete. Clients should prefer to
181// deal with the Error value directly, rather than converting to error_code.
182class InstrProfErrorCategoryType : public std::error_category {
183 const char *name() const noexcept override { return "llvm.instrprof"; }
184
185 std::string message(int IE) const override {
186 return getInstrProfErrString(Err: static_cast<instrprof_error>(IE));
187 }
188};
189
190} // end anonymous namespace
191
192const std::error_category &llvm::instrprof_category() {
193 static InstrProfErrorCategoryType ErrorCategory;
194 return ErrorCategory;
195}
196
197namespace {
198
199const char *InstrProfSectNameCommon[] = {
200#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \
201 SectNameCommon,
202#include "llvm/ProfileData/InstrProfData.inc"
203};
204
205const char *InstrProfSectNameCoff[] = {
206#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \
207 SectNameCoff,
208#include "llvm/ProfileData/InstrProfData.inc"
209};
210
211const char *InstrProfSectNamePrefix[] = {
212#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \
213 Prefix,
214#include "llvm/ProfileData/InstrProfData.inc"
215};
216
217} // namespace
218
219namespace llvm {
220
221cl::opt<bool> DoInstrProfNameCompression(
222 "enable-name-compression",
223 cl::desc("Enable name/filename string compression"), cl::init(Val: true));
224
225cl::opt<bool> EnableVTableValueProfiling(
226 "enable-vtable-value-profiling", cl::init(Val: false),
227 cl::desc("If true, the virtual table address will be instrumented to know "
228 "the types of a C++ pointer. The information is used in indirect "
229 "call promotion to do selective vtable-based comparison."));
230
231cl::opt<bool> EnableVTableProfileUse(
232 "enable-vtable-profile-use", cl::init(Val: false),
233 cl::desc("If ThinLTO and WPD is enabled and this option is true, vtable "
234 "profiles will be used by ICP pass for more efficient indirect "
235 "call sequence. If false, type profiles won't be used."));
236
237std::string getInstrProfSectionName(InstrProfSectKind IPSK,
238 Triple::ObjectFormatType OF,
239 bool AddSegmentInfo) {
240 std::string SectName;
241
242 if (OF == Triple::MachO && AddSegmentInfo)
243 SectName = InstrProfSectNamePrefix[IPSK];
244
245 if (OF == Triple::COFF)
246 SectName += InstrProfSectNameCoff[IPSK];
247 else
248 SectName += InstrProfSectNameCommon[IPSK];
249
250 if (OF == Triple::MachO && IPSK == IPSK_data && AddSegmentInfo)
251 SectName += ",regular,live_support";
252
253 return SectName;
254}
255
256std::string InstrProfError::message() const {
257 return getInstrProfErrString(Err, ErrMsg: Msg);
258}
259
260char InstrProfError::ID = 0;
261
262ProfOStream::ProfOStream(raw_fd_ostream &FD)
263 : IsFDOStream(true), OS(FD), LE(FD, llvm::endianness::little) {}
264
265ProfOStream::ProfOStream(raw_string_ostream &STR)
266 : IsFDOStream(false), OS(STR), LE(STR, llvm::endianness::little) {}
267
268uint64_t ProfOStream::tell() const { return OS.tell(); }
269void ProfOStream::write(uint64_t V) { LE.write<uint64_t>(Val: V); }
270void ProfOStream::write32(uint32_t V) { LE.write<uint32_t>(Val: V); }
271void ProfOStream::writeByte(uint8_t V) { LE.write<uint8_t>(Val: V); }
272
273void ProfOStream::patch(ArrayRef<PatchItem> P) {
274 using namespace support;
275
276 if (IsFDOStream) {
277 raw_fd_ostream &FDOStream = static_cast<raw_fd_ostream &>(OS);
278 const uint64_t LastPos = FDOStream.tell();
279 for (const auto &K : P) {
280 FDOStream.seek(off: K.Pos);
281 for (uint64_t Elem : K.D)
282 write(V: Elem);
283 }
284 // Reset the stream to the last position after patching so that users
285 // don't accidentally overwrite data. This makes it consistent with
286 // the string stream below which replaces the data directly.
287 FDOStream.seek(off: LastPos);
288 } else {
289 raw_string_ostream &SOStream = static_cast<raw_string_ostream &>(OS);
290 std::string &Data = SOStream.str(); // with flush
291 for (const auto &K : P) {
292 for (int I = 0, E = K.D.size(); I != E; I++) {
293 uint64_t Bytes =
294 endian::byte_swap<uint64_t, llvm::endianness::little>(value: K.D[I]);
295 Data.replace(pos: K.Pos + I * sizeof(uint64_t), n1: sizeof(uint64_t),
296 s: (const char *)&Bytes, n2: sizeof(uint64_t));
297 }
298 }
299 }
300}
301
302std::string getPGOFuncName(StringRef Name, GlobalValue::LinkageTypes Linkage,
303 StringRef FileName,
304 uint64_t Version LLVM_ATTRIBUTE_UNUSED) {
305 // Value names may be prefixed with a binary '1' to indicate
306 // that the backend should not modify the symbols due to any platform
307 // naming convention. Do not include that '1' in the PGO profile name.
308 if (Name[0] == '\1')
309 Name = Name.substr(Start: 1);
310
311 std::string NewName = std::string(Name);
312 if (llvm::GlobalValue::isLocalLinkage(Linkage)) {
313 // For local symbols, prepend the main file name to distinguish them.
314 // Do not include the full path in the file name since there's no guarantee
315 // that it will stay the same, e.g., if the files are checked out from
316 // version control in different locations.
317 if (FileName.empty())
318 NewName = NewName.insert(pos: 0, s: "<unknown>:");
319 else
320 NewName = NewName.insert(pos1: 0, str: FileName.str() + ":");
321 }
322 return NewName;
323}
324
325// Strip NumPrefix level of directory name from PathNameStr. If the number of
326// directory separators is less than NumPrefix, strip all the directories and
327// leave base file name only.
328static StringRef stripDirPrefix(StringRef PathNameStr, uint32_t NumPrefix) {
329 uint32_t Count = NumPrefix;
330 uint32_t Pos = 0, LastPos = 0;
331 for (const auto &CI : PathNameStr) {
332 ++Pos;
333 if (llvm::sys::path::is_separator(value: CI)) {
334 LastPos = Pos;
335 --Count;
336 }
337 if (Count == 0)
338 break;
339 }
340 return PathNameStr.substr(Start: LastPos);
341}
342
343static StringRef getStrippedSourceFileName(const GlobalObject &GO) {
344 StringRef FileName(GO.getParent()->getSourceFileName());
345 uint32_t StripLevel = StaticFuncFullModulePrefix ? 0 : (uint32_t)-1;
346 if (StripLevel < StaticFuncStripDirNamePrefix)
347 StripLevel = StaticFuncStripDirNamePrefix;
348 if (StripLevel)
349 FileName = stripDirPrefix(PathNameStr: FileName, NumPrefix: StripLevel);
350 return FileName;
351}
352
353// The PGO name has the format [<filepath>;]<mangled-name> where <filepath>; is
354// provided if linkage is local and is used to discriminate possibly identical
355// mangled names. ";" is used because it is unlikely to be found in either
356// <filepath> or <mangled-name>.
357//
358// Older compilers used getPGOFuncName() which has the format
359// [<filepath>:]<mangled-name>. This caused trouble for Objective-C functions
360// which commonly have :'s in their names. We still need to compute this name to
361// lookup functions from profiles built by older compilers.
362static std::string
363getIRPGONameForGlobalObject(const GlobalObject &GO,
364 GlobalValue::LinkageTypes Linkage,
365 StringRef FileName) {
366 return GlobalValue::getGlobalIdentifier(Name: GO.getName(), Linkage, FileName);
367}
368
369static std::optional<std::string> lookupPGONameFromMetadata(MDNode *MD) {
370 if (MD != nullptr) {
371 StringRef S = cast<MDString>(Val: MD->getOperand(I: 0))->getString();
372 return S.str();
373 }
374 return {};
375}
376
377// Returns the PGO object name. This function has some special handling
378// when called in LTO optimization. The following only applies when calling in
379// LTO passes (when \c InLTO is true): LTO's internalization privatizes many
380// global linkage symbols. This happens after value profile annotation, but
381// those internal linkage functions should not have a source prefix.
382// Additionally, for ThinLTO mode, exported internal functions are promoted
383// and renamed. We need to ensure that the original internal PGO name is
384// used when computing the GUID that is compared against the profiled GUIDs.
385// To differentiate compiler generated internal symbols from original ones,
386// PGOFuncName meta data are created and attached to the original internal
387// symbols in the value profile annotation step
388// (PGOUseFunc::annotateIndirectCallSites). If a symbol does not have the meta
389// data, its original linkage must be non-internal.
390static std::string getIRPGOObjectName(const GlobalObject &GO, bool InLTO,
391 MDNode *PGONameMetadata) {
392 if (!InLTO) {
393 auto FileName = getStrippedSourceFileName(GO);
394 return getIRPGONameForGlobalObject(GO, Linkage: GO.getLinkage(), FileName);
395 }
396
397 // In LTO mode (when InLTO is true), first check if there is a meta data.
398 if (auto IRPGOFuncName = lookupPGONameFromMetadata(MD: PGONameMetadata))
399 return *IRPGOFuncName;
400
401 // If there is no meta data, the function must be a global before the value
402 // profile annotation pass. Its current linkage may be internal if it is
403 // internalized in LTO mode.
404 return getIRPGONameForGlobalObject(GO, Linkage: GlobalValue::ExternalLinkage, FileName: "");
405}
406
407// Returns the IRPGO function name and does special handling when called
408// in LTO optimization. See the comments of `getIRPGOObjectName` for details.
409std::string getIRPGOFuncName(const Function &F, bool InLTO) {
410 return getIRPGOObjectName(GO: F, InLTO, PGONameMetadata: getPGOFuncNameMetadata(F));
411}
412
413// Please use getIRPGOFuncName for LLVM IR instrumentation. This function is
414// for front-end (Clang, etc) instrumentation.
415// The implementation is kept for profile matching from older profiles.
416// This is similar to `getIRPGOFuncName` except that this function calls
417// 'getPGOFuncName' to get a name and `getIRPGOFuncName` calls
418// 'getIRPGONameForGlobalObject'. See the difference between two callees in the
419// comments of `getIRPGONameForGlobalObject`.
420std::string getPGOFuncName(const Function &F, bool InLTO, uint64_t Version) {
421 if (!InLTO) {
422 auto FileName = getStrippedSourceFileName(GO: F);
423 return getPGOFuncName(Name: F.getName(), Linkage: F.getLinkage(), FileName, Version);
424 }
425
426 // In LTO mode (when InLTO is true), first check if there is a meta data.
427 if (auto PGOFuncName = lookupPGONameFromMetadata(MD: getPGOFuncNameMetadata(F)))
428 return *PGOFuncName;
429
430 // If there is no meta data, the function must be a global before the value
431 // profile annotation pass. Its current linkage may be internal if it is
432 // internalized in LTO mode.
433 return getPGOFuncName(Name: F.getName(), Linkage: GlobalValue::ExternalLinkage, FileName: "");
434}
435
436std::string getPGOName(const GlobalVariable &V, bool InLTO) {
437 // PGONameMetadata should be set by compiler at profile use time
438 // and read by symtab creation to look up symbols corresponding to
439 // a MD5 hash.
440 return getIRPGOObjectName(GO: V, InLTO, PGONameMetadata: V.getMetadata(Kind: getPGONameMetadataName()));
441}
442
443// See getIRPGOObjectName() for a discription of the format.
444std::pair<StringRef, StringRef> getParsedIRPGOName(StringRef IRPGOName) {
445 auto [FileName, MangledName] = IRPGOName.split(Separator: GlobalIdentifierDelimiter);
446 if (MangledName.empty())
447 return std::make_pair(x: StringRef(), y&: IRPGOName);
448 return std::make_pair(x&: FileName, y&: MangledName);
449}
450
451StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName, StringRef FileName) {
452 if (FileName.empty())
453 return PGOFuncName;
454 // Drop the file name including ':' or ';'. See getIRPGONameForGlobalObject as
455 // well.
456 if (PGOFuncName.starts_with(Prefix: FileName))
457 PGOFuncName = PGOFuncName.drop_front(N: FileName.size() + 1);
458 return PGOFuncName;
459}
460
461// \p FuncName is the string used as profile lookup key for the function. A
462// symbol is created to hold the name. Return the legalized symbol name.
463std::string getPGOFuncNameVarName(StringRef FuncName,
464 GlobalValue::LinkageTypes Linkage) {
465 std::string VarName = std::string(getInstrProfNameVarPrefix());
466 VarName += FuncName;
467
468 if (!GlobalValue::isLocalLinkage(Linkage))
469 return VarName;
470
471 // Now fix up illegal chars in local VarName that may upset the assembler.
472 const char InvalidChars[] = "-:;<>/\"'";
473 size_t FoundPos = VarName.find_first_of(s: InvalidChars);
474 while (FoundPos != std::string::npos) {
475 VarName[FoundPos] = '_';
476 FoundPos = VarName.find_first_of(s: InvalidChars, pos: FoundPos + 1);
477 }
478 return VarName;
479}
480
481bool isGPUProfTarget(const Module &M) {
482 const Triple &T = M.getTargetTriple();
483 return T.isGPU();
484}
485
486void setPGOFuncVisibility(Module &M, GlobalVariable *FuncNameVar) {
487 // If the target is a GPU, make the symbol protected so it can
488 // be read from the host device
489 if (isGPUProfTarget(M))
490 FuncNameVar->setVisibility(GlobalValue::ProtectedVisibility);
491 // Hide the symbol so that we correctly get a copy for each executable.
492 else if (!GlobalValue::isLocalLinkage(Linkage: FuncNameVar->getLinkage()))
493 FuncNameVar->setVisibility(GlobalValue::HiddenVisibility);
494}
495
496GlobalVariable *createPGOFuncNameVar(Module &M,
497 GlobalValue::LinkageTypes Linkage,
498 StringRef PGOFuncName) {
499 // Ensure profiling variables on GPU are visible to be read from host
500 if (isGPUProfTarget(M))
501 Linkage = GlobalValue::ExternalLinkage;
502 // We generally want to match the function's linkage, but available_externally
503 // and extern_weak both have the wrong semantics, and anything that doesn't
504 // need to link across compilation units doesn't need to be visible at all.
505 else if (Linkage == GlobalValue::ExternalWeakLinkage)
506 Linkage = GlobalValue::LinkOnceAnyLinkage;
507 else if (Linkage == GlobalValue::AvailableExternallyLinkage)
508 Linkage = GlobalValue::LinkOnceODRLinkage;
509 else if (Linkage == GlobalValue::InternalLinkage ||
510 Linkage == GlobalValue::ExternalLinkage)
511 Linkage = GlobalValue::PrivateLinkage;
512
513 auto *Value =
514 ConstantDataArray::getString(Context&: M.getContext(), Initializer: PGOFuncName, AddNull: false);
515 auto *FuncNameVar =
516 new GlobalVariable(M, Value->getType(), true, Linkage, Value,
517 getPGOFuncNameVarName(FuncName: PGOFuncName, Linkage));
518
519 setPGOFuncVisibility(M, FuncNameVar);
520 return FuncNameVar;
521}
522
523GlobalVariable *createPGOFuncNameVar(Function &F, StringRef PGOFuncName) {
524 return createPGOFuncNameVar(M&: *F.getParent(), Linkage: F.getLinkage(), PGOFuncName);
525}
526
527Error InstrProfSymtab::create(Module &M, bool InLTO, bool AddCanonical) {
528 for (Function &F : M) {
529 // Function may not have a name: like using asm("") to overwrite the name.
530 // Ignore in this case.
531 if (!F.hasName())
532 continue;
533 if (Error E = addFuncWithName(F, PGOFuncName: getIRPGOFuncName(F, InLTO), AddCanonical))
534 return E;
535 // Also use getPGOFuncName() so that we can find records from older profiles
536 if (Error E = addFuncWithName(F, PGOFuncName: getPGOFuncName(F, InLTO), AddCanonical))
537 return E;
538 }
539
540 for (GlobalVariable &G : M.globals()) {
541 if (!G.hasName() || !G.hasMetadata(KindID: LLVMContext::MD_type))
542 continue;
543 if (Error E = addVTableWithName(V&: G, PGOVTableName: getPGOName(V: G, InLTO)))
544 return E;
545 }
546
547 Sorted = false;
548 finalizeSymtab();
549 return Error::success();
550}
551
552Error InstrProfSymtab::addVTableWithName(GlobalVariable &VTable,
553 StringRef VTablePGOName) {
554 auto NameToGUIDMap = [&](StringRef Name) -> Error {
555 if (Error E = addSymbolName(SymbolName: Name))
556 return E;
557
558 bool Inserted = true;
559 std::tie(args: std::ignore, args&: Inserted) = MD5VTableMap.try_emplace(
560 Key: GlobalValue::getGUIDAssumingExternalLinkage(GlobalName: Name), Args: &VTable);
561 if (!Inserted)
562 LLVM_DEBUG(dbgs() << "GUID conflict within one module");
563 return Error::success();
564 };
565 if (Error E = NameToGUIDMap(VTablePGOName))
566 return E;
567
568 StringRef CanonicalName = getCanonicalName(PGOName: VTablePGOName);
569 if (CanonicalName != VTablePGOName)
570 return NameToGUIDMap(CanonicalName);
571
572 return Error::success();
573}
574
575Error readAndDecodeStrings(StringRef NameStrings,
576 std::function<Error(StringRef)> NameCallback) {
577 const uint8_t *P = NameStrings.bytes_begin();
578 const uint8_t *EndP = NameStrings.bytes_end();
579 while (P < EndP) {
580 uint32_t N;
581 uint64_t UncompressedSize = decodeULEB128(p: P, n: &N);
582 P += N;
583 uint64_t CompressedSize = decodeULEB128(p: P, n: &N);
584 P += N;
585 const bool IsCompressed = (CompressedSize != 0);
586 SmallVector<uint8_t, 128> UncompressedNameStrings;
587 StringRef NameStrings;
588 if (IsCompressed) {
589 if (!llvm::compression::zlib::isAvailable())
590 return make_error<InstrProfError>(Args: instrprof_error::zlib_unavailable);
591
592 if (Error E = compression::zlib::decompress(Input: ArrayRef(P, CompressedSize),
593 Output&: UncompressedNameStrings,
594 UncompressedSize)) {
595 consumeError(Err: std::move(E));
596 return make_error<InstrProfError>(Args: instrprof_error::uncompress_failed);
597 }
598 P += CompressedSize;
599 NameStrings = toStringRef(Input: UncompressedNameStrings);
600 } else {
601 NameStrings =
602 StringRef(reinterpret_cast<const char *>(P), UncompressedSize);
603 P += UncompressedSize;
604 }
605 // Now parse the name strings.
606 SmallVector<StringRef, 0> Names;
607 NameStrings.split(A&: Names, Separator: getInstrProfNameSeparator());
608 for (StringRef &Name : Names)
609 if (Error E = NameCallback(Name))
610 return E;
611
612 while (P < EndP && *P == 0)
613 P++;
614 }
615 return Error::success();
616}
617
618Error InstrProfSymtab::create(StringRef NameStrings) {
619 return readAndDecodeStrings(
620 NameStrings,
621 NameCallback: std::bind(f: &InstrProfSymtab::addFuncName, args: this, args: std::placeholders::_1));
622}
623
624Error InstrProfSymtab::create(StringRef FuncNameStrings,
625 StringRef VTableNameStrings) {
626 if (Error E = readAndDecodeStrings(NameStrings: FuncNameStrings,
627 NameCallback: std::bind(f: &InstrProfSymtab::addFuncName,
628 args: this, args: std::placeholders::_1)))
629 return E;
630
631 return readAndDecodeStrings(
632 NameStrings: VTableNameStrings,
633 NameCallback: std::bind(f: &InstrProfSymtab::addVTableName, args: this, args: std::placeholders::_1));
634}
635
636Error InstrProfSymtab::initVTableNamesFromCompressedStrings(
637 StringRef CompressedVTableStrings) {
638 return readAndDecodeStrings(
639 NameStrings: CompressedVTableStrings,
640 NameCallback: std::bind(f: &InstrProfSymtab::addVTableName, args: this, args: std::placeholders::_1));
641}
642
643StringRef InstrProfSymtab::getCanonicalName(StringRef PGOName) {
644 // In ThinLTO, local function may have been promoted to global and have
645 // suffix ".llvm." added to the function name. We need to add the
646 // stripped function name to the symbol table so that we can find a match
647 // from profile.
648 //
649 // ".__uniq." suffix is used to differentiate internal linkage functions in
650 // different modules and should be kept. This is the only suffix with the
651 // pattern ".xxx" which is kept before matching, other suffixes similar as
652 // ".llvm." will be stripped.
653 const std::string UniqSuffix = ".__uniq.";
654 size_t Pos = PGOName.find(Str: UniqSuffix);
655 if (Pos != StringRef::npos)
656 Pos += UniqSuffix.length();
657 else
658 Pos = 0;
659
660 // Search '.' after ".__uniq." if ".__uniq." exists, otherwise search '.' from
661 // the beginning.
662 Pos = PGOName.find(C: '.', From: Pos);
663 if (Pos != StringRef::npos && Pos != 0)
664 return PGOName.substr(Start: 0, N: Pos);
665
666 return PGOName;
667}
668
669Error InstrProfSymtab::addFuncWithName(Function &F, StringRef PGOFuncName,
670 bool AddCanonical) {
671 auto NameToGUIDMap = [&](StringRef Name) -> Error {
672 if (Error E = addFuncName(FuncName: Name))
673 return E;
674 MD5FuncMap.emplace_back(args: Function::getGUIDAssumingExternalLinkage(GlobalName: Name), args: &F);
675 return Error::success();
676 };
677 if (Error E = NameToGUIDMap(PGOFuncName))
678 return E;
679
680 if (!AddCanonical)
681 return Error::success();
682
683 StringRef CanonicalFuncName = getCanonicalName(PGOName: PGOFuncName);
684 if (CanonicalFuncName != PGOFuncName)
685 return NameToGUIDMap(CanonicalFuncName);
686
687 return Error::success();
688}
689
690uint64_t InstrProfSymtab::getVTableHashFromAddress(uint64_t Address) {
691 // Given a runtime address, look up the hash value in the interval map, and
692 // fallback to value 0 if a hash value is not found.
693 return VTableAddrMap.lookup(x: Address, NotFound: 0);
694}
695
696uint64_t InstrProfSymtab::getFunctionHashFromAddress(uint64_t Address) {
697 finalizeSymtab();
698 auto It = partition_point(Range&: AddrToMD5Map, P: [=](std::pair<uint64_t, uint64_t> A) {
699 return A.first < Address;
700 });
701 // Raw function pointer collected by value profiler may be from
702 // external functions that are not instrumented. They won't have
703 // mapping data to be used by the deserializer. Force the value to
704 // be 0 in this case.
705 if (It != AddrToMD5Map.end() && It->first == Address)
706 return (uint64_t)It->second;
707 return 0;
708}
709
710void InstrProfSymtab::dumpNames(raw_ostream &OS) const {
711 SmallVector<StringRef, 0> Sorted(NameTab.keys());
712 llvm::sort(C&: Sorted);
713 for (StringRef S : Sorted)
714 OS << S << '\n';
715}
716
717Error collectGlobalObjectNameStrings(ArrayRef<std::string> NameStrs,
718 bool DoCompression, std::string &Result) {
719 assert(!NameStrs.empty() && "No name data to emit");
720
721 uint8_t Header[20], *P = Header;
722 std::string UncompressedNameStrings =
723 join(Begin: NameStrs.begin(), End: NameStrs.end(), Separator: getInstrProfNameSeparator());
724
725 assert(StringRef(UncompressedNameStrings)
726 .count(getInstrProfNameSeparator()) == (NameStrs.size() - 1) &&
727 "PGO name is invalid (contains separator token)");
728
729 unsigned EncLen = encodeULEB128(Value: UncompressedNameStrings.length(), p: P);
730 P += EncLen;
731
732 auto WriteStringToResult = [&](size_t CompressedLen, StringRef InputStr) {
733 EncLen = encodeULEB128(Value: CompressedLen, p: P);
734 P += EncLen;
735 char *HeaderStr = reinterpret_cast<char *>(&Header[0]);
736 unsigned HeaderLen = P - &Header[0];
737 Result.append(s: HeaderStr, n: HeaderLen);
738 Result += InputStr;
739 return Error::success();
740 };
741
742 if (!DoCompression) {
743 return WriteStringToResult(0, UncompressedNameStrings);
744 }
745
746 SmallVector<uint8_t, 128> CompressedNameStrings;
747 compression::zlib::compress(Input: arrayRefFromStringRef(Input: UncompressedNameStrings),
748 CompressedBuffer&: CompressedNameStrings,
749 Level: compression::zlib::BestSizeCompression);
750
751 return WriteStringToResult(CompressedNameStrings.size(),
752 toStringRef(Input: CompressedNameStrings));
753}
754
755StringRef getPGOFuncNameVarInitializer(GlobalVariable *NameVar) {
756 auto *Arr = cast<ConstantDataArray>(Val: NameVar->getInitializer());
757 StringRef NameStr =
758 Arr->isCString() ? Arr->getAsCString() : Arr->getAsString();
759 return NameStr;
760}
761
762Error collectPGOFuncNameStrings(ArrayRef<GlobalVariable *> NameVars,
763 std::string &Result, bool DoCompression) {
764 std::vector<std::string> NameStrs;
765 for (auto *NameVar : NameVars) {
766 NameStrs.push_back(x: std::string(getPGOFuncNameVarInitializer(NameVar)));
767 }
768 return collectGlobalObjectNameStrings(
769 NameStrs, DoCompression: compression::zlib::isAvailable() && DoCompression, Result);
770}
771
772Error collectVTableStrings(ArrayRef<GlobalVariable *> VTables,
773 std::string &Result, bool DoCompression) {
774 std::vector<std::string> VTableNameStrs;
775 for (auto *VTable : VTables)
776 VTableNameStrs.push_back(x: getPGOName(V: *VTable));
777 return collectGlobalObjectNameStrings(
778 NameStrs: VTableNameStrs, DoCompression: compression::zlib::isAvailable() && DoCompression,
779 Result);
780}
781
782void InstrProfRecord::accumulateCounts(CountSumOrPercent &Sum) const {
783 uint64_t FuncSum = 0;
784 Sum.NumEntries += Counts.size();
785 for (uint64_t Count : Counts)
786 FuncSum += Count;
787 Sum.CountSum += FuncSum;
788
789 for (uint32_t VK = IPVK_First; VK <= IPVK_Last; ++VK) {
790 uint64_t KindSum = 0;
791 uint32_t NumValueSites = getNumValueSites(ValueKind: VK);
792 for (size_t I = 0; I < NumValueSites; ++I) {
793 for (const auto &V : getValueArrayForSite(ValueKind: VK, Site: I))
794 KindSum += V.Count;
795 }
796 Sum.ValueCounts[VK] += KindSum;
797 }
798}
799
800void InstrProfValueSiteRecord::overlap(InstrProfValueSiteRecord &Input,
801 uint32_t ValueKind,
802 OverlapStats &Overlap,
803 OverlapStats &FuncLevelOverlap) {
804 this->sortByTargetValues();
805 Input.sortByTargetValues();
806 double Score = 0.0f, FuncLevelScore = 0.0f;
807 auto I = ValueData.begin();
808 auto IE = ValueData.end();
809 auto J = Input.ValueData.begin();
810 auto JE = Input.ValueData.end();
811 while (I != IE && J != JE) {
812 if (I->Value == J->Value) {
813 Score += OverlapStats::score(Val1: I->Count, Val2: J->Count,
814 Sum1: Overlap.Base.ValueCounts[ValueKind],
815 Sum2: Overlap.Test.ValueCounts[ValueKind]);
816 FuncLevelScore += OverlapStats::score(
817 Val1: I->Count, Val2: J->Count, Sum1: FuncLevelOverlap.Base.ValueCounts[ValueKind],
818 Sum2: FuncLevelOverlap.Test.ValueCounts[ValueKind]);
819 ++I;
820 } else if (I->Value < J->Value) {
821 ++I;
822 continue;
823 }
824 ++J;
825 }
826 Overlap.Overlap.ValueCounts[ValueKind] += Score;
827 FuncLevelOverlap.Overlap.ValueCounts[ValueKind] += FuncLevelScore;
828}
829
830// Return false on mismatch.
831void InstrProfRecord::overlapValueProfData(uint32_t ValueKind,
832 InstrProfRecord &Other,
833 OverlapStats &Overlap,
834 OverlapStats &FuncLevelOverlap) {
835 uint32_t ThisNumValueSites = getNumValueSites(ValueKind);
836 assert(ThisNumValueSites == Other.getNumValueSites(ValueKind));
837 if (!ThisNumValueSites)
838 return;
839
840 std::vector<InstrProfValueSiteRecord> &ThisSiteRecords =
841 getOrCreateValueSitesForKind(ValueKind);
842 MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords =
843 Other.getValueSitesForKind(ValueKind);
844 for (uint32_t I = 0; I < ThisNumValueSites; I++)
845 ThisSiteRecords[I].overlap(Input&: OtherSiteRecords[I], ValueKind, Overlap,
846 FuncLevelOverlap);
847}
848
849void InstrProfRecord::overlap(InstrProfRecord &Other, OverlapStats &Overlap,
850 OverlapStats &FuncLevelOverlap,
851 uint64_t ValueCutoff) {
852 // FuncLevel CountSum for other should already computed and nonzero.
853 assert(FuncLevelOverlap.Test.CountSum >= 1.0f);
854 accumulateCounts(Sum&: FuncLevelOverlap.Base);
855 bool Mismatch = (Counts.size() != Other.Counts.size());
856
857 // Check if the value profiles mismatch.
858 if (!Mismatch) {
859 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) {
860 uint32_t ThisNumValueSites = getNumValueSites(ValueKind: Kind);
861 uint32_t OtherNumValueSites = Other.getNumValueSites(ValueKind: Kind);
862 if (ThisNumValueSites != OtherNumValueSites) {
863 Mismatch = true;
864 break;
865 }
866 }
867 }
868 if (Mismatch) {
869 Overlap.addOneMismatch(MismatchFunc: FuncLevelOverlap.Test);
870 return;
871 }
872
873 // Compute overlap for value counts.
874 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
875 overlapValueProfData(ValueKind: Kind, Other, Overlap, FuncLevelOverlap);
876
877 double Score = 0.0;
878 uint64_t MaxCount = 0;
879 // Compute overlap for edge counts.
880 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) {
881 Score += OverlapStats::score(Val1: Counts[I], Val2: Other.Counts[I],
882 Sum1: Overlap.Base.CountSum, Sum2: Overlap.Test.CountSum);
883 MaxCount = std::max(a: Other.Counts[I], b: MaxCount);
884 }
885 Overlap.Overlap.CountSum += Score;
886 Overlap.Overlap.NumEntries += 1;
887
888 if (MaxCount >= ValueCutoff) {
889 double FuncScore = 0.0;
890 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I)
891 FuncScore += OverlapStats::score(Val1: Counts[I], Val2: Other.Counts[I],
892 Sum1: FuncLevelOverlap.Base.CountSum,
893 Sum2: FuncLevelOverlap.Test.CountSum);
894 FuncLevelOverlap.Overlap.CountSum = FuncScore;
895 FuncLevelOverlap.Overlap.NumEntries = Other.Counts.size();
896 FuncLevelOverlap.Valid = true;
897 }
898}
899
900void InstrProfValueSiteRecord::merge(InstrProfValueSiteRecord &Input,
901 uint64_t Weight,
902 function_ref<void(instrprof_error)> Warn) {
903 this->sortByTargetValues();
904 Input.sortByTargetValues();
905 auto I = ValueData.begin();
906 auto IE = ValueData.end();
907 std::vector<InstrProfValueData> Merged;
908 Merged.reserve(n: std::max(a: ValueData.size(), b: Input.ValueData.size()));
909 for (const InstrProfValueData &J : Input.ValueData) {
910 while (I != IE && I->Value < J.Value) {
911 Merged.push_back(x: *I);
912 ++I;
913 }
914 if (I != IE && I->Value == J.Value) {
915 bool Overflowed;
916 I->Count = SaturatingMultiplyAdd(X: J.Count, Y: Weight, A: I->Count, ResultOverflowed: &Overflowed);
917 if (Overflowed)
918 Warn(instrprof_error::counter_overflow);
919 Merged.push_back(x: *I);
920 ++I;
921 continue;
922 }
923 Merged.push_back(x: J);
924 }
925 Merged.insert(position: Merged.end(), first: I, last: IE);
926 ValueData = std::move(Merged);
927}
928
929void InstrProfValueSiteRecord::scale(uint64_t N, uint64_t D,
930 function_ref<void(instrprof_error)> Warn) {
931 for (InstrProfValueData &I : ValueData) {
932 bool Overflowed;
933 I.Count = SaturatingMultiply(X: I.Count, Y: N, ResultOverflowed: &Overflowed) / D;
934 if (Overflowed)
935 Warn(instrprof_error::counter_overflow);
936 }
937}
938
939// Merge Value Profile data from Src record to this record for ValueKind.
940// Scale merged value counts by \p Weight.
941void InstrProfRecord::mergeValueProfData(
942 uint32_t ValueKind, InstrProfRecord &Src, uint64_t Weight,
943 function_ref<void(instrprof_error)> Warn) {
944 uint32_t ThisNumValueSites = getNumValueSites(ValueKind);
945 uint32_t OtherNumValueSites = Src.getNumValueSites(ValueKind);
946 if (ThisNumValueSites != OtherNumValueSites) {
947 Warn(instrprof_error::value_site_count_mismatch);
948 return;
949 }
950 if (!ThisNumValueSites)
951 return;
952 std::vector<InstrProfValueSiteRecord> &ThisSiteRecords =
953 getOrCreateValueSitesForKind(ValueKind);
954 MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords =
955 Src.getValueSitesForKind(ValueKind);
956 for (uint32_t I = 0; I < ThisNumValueSites; I++)
957 ThisSiteRecords[I].merge(Input&: OtherSiteRecords[I], Weight, Warn);
958}
959
960void InstrProfRecord::merge(InstrProfRecord &Other, uint64_t Weight,
961 function_ref<void(instrprof_error)> Warn) {
962 // If the number of counters doesn't match we either have bad data
963 // or a hash collision.
964 if (Counts.size() != Other.Counts.size()) {
965 Warn(instrprof_error::count_mismatch);
966 return;
967 }
968
969 // Special handling of the first count as the PseudoCount.
970 CountPseudoKind OtherKind = Other.getCountPseudoKind();
971 CountPseudoKind ThisKind = getCountPseudoKind();
972 if (OtherKind != NotPseudo || ThisKind != NotPseudo) {
973 // We don't allow the merge of a profile with pseudo counts and
974 // a normal profile (i.e. without pesudo counts).
975 // Profile supplimenation should be done after the profile merge.
976 if (OtherKind == NotPseudo || ThisKind == NotPseudo) {
977 Warn(instrprof_error::count_mismatch);
978 return;
979 }
980 if (OtherKind == PseudoHot || ThisKind == PseudoHot)
981 setPseudoCount(PseudoHot);
982 else
983 setPseudoCount(PseudoWarm);
984 return;
985 }
986
987 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) {
988 bool Overflowed;
989 uint64_t Value =
990 SaturatingMultiplyAdd(X: Other.Counts[I], Y: Weight, A: Counts[I], ResultOverflowed: &Overflowed);
991 if (Value > getInstrMaxCountValue()) {
992 Value = getInstrMaxCountValue();
993 Overflowed = true;
994 }
995 Counts[I] = Value;
996 if (Overflowed)
997 Warn(instrprof_error::counter_overflow);
998 }
999
1000 // If the number of bitmap bytes doesn't match we either have bad data
1001 // or a hash collision.
1002 if (BitmapBytes.size() != Other.BitmapBytes.size()) {
1003 Warn(instrprof_error::bitmap_mismatch);
1004 return;
1005 }
1006
1007 // Bitmap bytes are merged by simply ORing them together.
1008 for (size_t I = 0, E = Other.BitmapBytes.size(); I < E; ++I) {
1009 BitmapBytes[I] = Other.BitmapBytes[I] | BitmapBytes[I];
1010 }
1011
1012 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
1013 mergeValueProfData(ValueKind: Kind, Src&: Other, Weight, Warn);
1014}
1015
1016void InstrProfRecord::scaleValueProfData(
1017 uint32_t ValueKind, uint64_t N, uint64_t D,
1018 function_ref<void(instrprof_error)> Warn) {
1019 for (auto &R : getValueSitesForKind(ValueKind))
1020 R.scale(N, D, Warn);
1021}
1022
1023void InstrProfRecord::scale(uint64_t N, uint64_t D,
1024 function_ref<void(instrprof_error)> Warn) {
1025 assert(D != 0 && "D cannot be 0");
1026 for (auto &Count : this->Counts) {
1027 bool Overflowed;
1028 Count = SaturatingMultiply(X: Count, Y: N, ResultOverflowed: &Overflowed) / D;
1029 if (Count > getInstrMaxCountValue()) {
1030 Count = getInstrMaxCountValue();
1031 Overflowed = true;
1032 }
1033 if (Overflowed)
1034 Warn(instrprof_error::counter_overflow);
1035 }
1036 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
1037 scaleValueProfData(ValueKind: Kind, N, D, Warn);
1038}
1039
1040// Map indirect call target name hash to name string.
1041uint64_t InstrProfRecord::remapValue(uint64_t Value, uint32_t ValueKind,
1042 InstrProfSymtab *SymTab) {
1043 if (!SymTab)
1044 return Value;
1045
1046 if (ValueKind == IPVK_IndirectCallTarget)
1047 return SymTab->getFunctionHashFromAddress(Address: Value);
1048
1049 if (ValueKind == IPVK_VTableTarget)
1050 return SymTab->getVTableHashFromAddress(Address: Value);
1051
1052 return Value;
1053}
1054
1055void InstrProfRecord::addValueData(uint32_t ValueKind, uint32_t Site,
1056 ArrayRef<InstrProfValueData> VData,
1057 InstrProfSymtab *ValueMap) {
1058 // Remap values.
1059 std::vector<InstrProfValueData> RemappedVD;
1060 RemappedVD.reserve(n: VData.size());
1061 for (const auto &V : VData) {
1062 uint64_t NewValue = remapValue(Value: V.Value, ValueKind, SymTab: ValueMap);
1063 RemappedVD.push_back(x: {.Value: NewValue, .Count: V.Count});
1064 }
1065
1066 std::vector<InstrProfValueSiteRecord> &ValueSites =
1067 getOrCreateValueSitesForKind(ValueKind);
1068 assert(ValueSites.size() == Site);
1069
1070 // Add a new value site with remapped value profiling data.
1071 ValueSites.emplace_back(args: std::move(RemappedVD));
1072}
1073
1074void TemporalProfTraceTy::createBPFunctionNodes(
1075 ArrayRef<TemporalProfTraceTy> Traces, std::vector<BPFunctionNode> &Nodes,
1076 bool RemoveOutlierUNs) {
1077 using IDT = BPFunctionNode::IDT;
1078 using UtilityNodeT = BPFunctionNode::UtilityNodeT;
1079 UtilityNodeT MaxUN = 0;
1080 DenseMap<IDT, size_t> IdToFirstTimestamp;
1081 DenseMap<IDT, UtilityNodeT> IdToFirstUN;
1082 DenseMap<IDT, SmallVector<UtilityNodeT>> IdToUNs;
1083 // TODO: We need to use the Trace.Weight field to give more weight to more
1084 // important utilities
1085 for (auto &Trace : Traces) {
1086 size_t CutoffTimestamp = 1;
1087 for (size_t Timestamp = 0; Timestamp < Trace.FunctionNameRefs.size();
1088 Timestamp++) {
1089 IDT Id = Trace.FunctionNameRefs[Timestamp];
1090 auto [It, WasInserted] = IdToFirstTimestamp.try_emplace(Key: Id, Args&: Timestamp);
1091 if (!WasInserted)
1092 It->getSecond() = std::min<size_t>(a: It->getSecond(), b: Timestamp);
1093 if (Timestamp >= CutoffTimestamp) {
1094 ++MaxUN;
1095 CutoffTimestamp = 2 * Timestamp;
1096 }
1097 IdToFirstUN.try_emplace(Key: Id, Args&: MaxUN);
1098 }
1099 for (auto &[Id, FirstUN] : IdToFirstUN)
1100 for (auto UN = FirstUN; UN <= MaxUN; ++UN)
1101 IdToUNs[Id].push_back(Elt: UN);
1102 ++MaxUN;
1103 IdToFirstUN.clear();
1104 }
1105
1106 if (RemoveOutlierUNs) {
1107 DenseMap<UtilityNodeT, unsigned> UNFrequency;
1108 for (auto &[Id, UNs] : IdToUNs)
1109 for (auto &UN : UNs)
1110 ++UNFrequency[UN];
1111 // Filter out utility nodes that are too infrequent or too prevalent to make
1112 // BalancedPartitioning more effective.
1113 for (auto &[Id, UNs] : IdToUNs)
1114 llvm::erase_if(C&: UNs, P: [&](auto &UN) {
1115 unsigned Freq = UNFrequency[UN];
1116 return Freq <= 1 || 2 * Freq > IdToUNs.size();
1117 });
1118 }
1119
1120 for (auto &[Id, UNs] : IdToUNs)
1121 Nodes.emplace_back(args&: Id, args&: UNs);
1122
1123 // Since BalancedPartitioning is sensitive to the initial order, we explicitly
1124 // order nodes by their earliest timestamp.
1125 llvm::sort(C&: Nodes, Comp: [&](auto &L, auto &R) {
1126 return std::make_pair(IdToFirstTimestamp[L.Id], L.Id) <
1127 std::make_pair(IdToFirstTimestamp[R.Id], R.Id);
1128 });
1129}
1130
1131#define INSTR_PROF_COMMON_API_IMPL
1132#include "llvm/ProfileData/InstrProfData.inc"
1133
1134/*!
1135 * ValueProfRecordClosure Interface implementation for InstrProfRecord
1136 * class. These C wrappers are used as adaptors so that C++ code can be
1137 * invoked as callbacks.
1138 */
1139uint32_t getNumValueKindsInstrProf(const void *Record) {
1140 return reinterpret_cast<const InstrProfRecord *>(Record)->getNumValueKinds();
1141}
1142
1143uint32_t getNumValueSitesInstrProf(const void *Record, uint32_t VKind) {
1144 return reinterpret_cast<const InstrProfRecord *>(Record)
1145 ->getNumValueSites(ValueKind: VKind);
1146}
1147
1148uint32_t getNumValueDataInstrProf(const void *Record, uint32_t VKind) {
1149 return reinterpret_cast<const InstrProfRecord *>(Record)
1150 ->getNumValueData(ValueKind: VKind);
1151}
1152
1153uint32_t getNumValueDataForSiteInstrProf(const void *R, uint32_t VK,
1154 uint32_t S) {
1155 const auto *IPR = reinterpret_cast<const InstrProfRecord *>(R);
1156 return IPR->getValueArrayForSite(ValueKind: VK, Site: S).size();
1157}
1158
1159void getValueForSiteInstrProf(const void *R, InstrProfValueData *Dst,
1160 uint32_t K, uint32_t S) {
1161 const auto *IPR = reinterpret_cast<const InstrProfRecord *>(R);
1162 llvm::copy(Range: IPR->getValueArrayForSite(ValueKind: K, Site: S), Out: Dst);
1163}
1164
1165ValueProfData *allocValueProfDataInstrProf(size_t TotalSizeInBytes) {
1166 ValueProfData *VD =
1167 (ValueProfData *)(new (::operator new(TotalSizeInBytes)) ValueProfData());
1168 memset(s: VD, c: 0, n: TotalSizeInBytes);
1169 return VD;
1170}
1171
1172static ValueProfRecordClosure InstrProfRecordClosure = {
1173 .Record: nullptr,
1174 .GetNumValueKinds: getNumValueKindsInstrProf,
1175 .GetNumValueSites: getNumValueSitesInstrProf,
1176 .GetNumValueData: getNumValueDataInstrProf,
1177 .GetNumValueDataForSite: getNumValueDataForSiteInstrProf,
1178 .RemapValueData: nullptr,
1179 .GetValueForSite: getValueForSiteInstrProf,
1180 .AllocValueProfData: allocValueProfDataInstrProf};
1181
1182// Wrapper implementation using the closure mechanism.
1183uint32_t ValueProfData::getSize(const InstrProfRecord &Record) {
1184 auto Closure = InstrProfRecordClosure;
1185 Closure.Record = &Record;
1186 return getValueProfDataSize(Closure: &Closure);
1187}
1188
1189// Wrapper implementation using the closure mechanism.
1190std::unique_ptr<ValueProfData>
1191ValueProfData::serializeFrom(const InstrProfRecord &Record) {
1192 InstrProfRecordClosure.Record = &Record;
1193
1194 std::unique_ptr<ValueProfData> VPD(
1195 serializeValueProfDataFrom(Closure: &InstrProfRecordClosure, DstData: nullptr));
1196 return VPD;
1197}
1198
1199void ValueProfRecord::deserializeTo(InstrProfRecord &Record,
1200 InstrProfSymtab *SymTab) {
1201 Record.reserveSites(ValueKind: Kind, NumValueSites);
1202
1203 InstrProfValueData *ValueData = getValueProfRecordValueData(This: this);
1204 for (uint64_t VSite = 0; VSite < NumValueSites; ++VSite) {
1205 uint8_t ValueDataCount = this->SiteCountArray[VSite];
1206 ArrayRef<InstrProfValueData> VDs(ValueData, ValueDataCount);
1207 Record.addValueData(ValueKind: Kind, Site: VSite, VData: VDs, ValueMap: SymTab);
1208 ValueData += ValueDataCount;
1209 }
1210}
1211
1212// For writing/serializing, Old is the host endianness, and New is
1213// byte order intended on disk. For Reading/deserialization, Old
1214// is the on-disk source endianness, and New is the host endianness.
1215void ValueProfRecord::swapBytes(llvm::endianness Old, llvm::endianness New) {
1216 using namespace support;
1217
1218 if (Old == New)
1219 return;
1220
1221 if (llvm::endianness::native != Old) {
1222 sys::swapByteOrder<uint32_t>(Value&: NumValueSites);
1223 sys::swapByteOrder<uint32_t>(Value&: Kind);
1224 }
1225 uint32_t ND = getValueProfRecordNumValueData(This: this);
1226 InstrProfValueData *VD = getValueProfRecordValueData(This: this);
1227
1228 // No need to swap byte array: SiteCountArrray.
1229 for (uint32_t I = 0; I < ND; I++) {
1230 sys::swapByteOrder<uint64_t>(Value&: VD[I].Value);
1231 sys::swapByteOrder<uint64_t>(Value&: VD[I].Count);
1232 }
1233 if (llvm::endianness::native == Old) {
1234 sys::swapByteOrder<uint32_t>(Value&: NumValueSites);
1235 sys::swapByteOrder<uint32_t>(Value&: Kind);
1236 }
1237}
1238
1239void ValueProfData::deserializeTo(InstrProfRecord &Record,
1240 InstrProfSymtab *SymTab) {
1241 if (NumValueKinds == 0)
1242 return;
1243
1244 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1245 for (uint32_t K = 0; K < NumValueKinds; K++) {
1246 VR->deserializeTo(Record, SymTab);
1247 VR = getValueProfRecordNext(This: VR);
1248 }
1249}
1250
1251static std::unique_ptr<ValueProfData> allocValueProfData(uint32_t TotalSize) {
1252 return std::unique_ptr<ValueProfData>(new (::operator new(TotalSize))
1253 ValueProfData());
1254}
1255
1256Error ValueProfData::checkIntegrity() {
1257 if (NumValueKinds > IPVK_Last + 1)
1258 return make_error<InstrProfError>(
1259 Args: instrprof_error::malformed, Args: "number of value profile kinds is invalid");
1260 // Total size needs to be multiple of quadword size.
1261 if (TotalSize % sizeof(uint64_t))
1262 return make_error<InstrProfError>(
1263 Args: instrprof_error::malformed, Args: "total size is not multiples of quardword");
1264
1265 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1266 for (uint32_t K = 0; K < this->NumValueKinds; K++) {
1267 if (VR->Kind > IPVK_Last)
1268 return make_error<InstrProfError>(Args: instrprof_error::malformed,
1269 Args: "value kind is invalid");
1270 VR = getValueProfRecordNext(This: VR);
1271 if ((char *)VR - (char *)this > (ptrdiff_t)TotalSize)
1272 return make_error<InstrProfError>(
1273 Args: instrprof_error::malformed,
1274 Args: "value profile address is greater than total size");
1275 }
1276 return Error::success();
1277}
1278
1279Expected<std::unique_ptr<ValueProfData>>
1280ValueProfData::getValueProfData(const unsigned char *D,
1281 const unsigned char *const BufferEnd,
1282 llvm::endianness Endianness) {
1283 using namespace support;
1284
1285 if (D + sizeof(ValueProfData) > BufferEnd)
1286 return make_error<InstrProfError>(Args: instrprof_error::truncated);
1287
1288 const unsigned char *Header = D;
1289 uint32_t TotalSize = endian::readNext<uint32_t>(memory&: Header, endian: Endianness);
1290
1291 if (D + TotalSize > BufferEnd)
1292 return make_error<InstrProfError>(Args: instrprof_error::too_large);
1293
1294 std::unique_ptr<ValueProfData> VPD = allocValueProfData(TotalSize);
1295 memcpy(dest: VPD.get(), src: D, n: TotalSize);
1296 // Byte swap.
1297 VPD->swapBytesToHost(Endianness);
1298
1299 Error E = VPD->checkIntegrity();
1300 if (E)
1301 return std::move(E);
1302
1303 return std::move(VPD);
1304}
1305
1306void ValueProfData::swapBytesToHost(llvm::endianness Endianness) {
1307 using namespace support;
1308
1309 if (Endianness == llvm::endianness::native)
1310 return;
1311
1312 sys::swapByteOrder<uint32_t>(Value&: TotalSize);
1313 sys::swapByteOrder<uint32_t>(Value&: NumValueKinds);
1314
1315 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1316 for (uint32_t K = 0; K < NumValueKinds; K++) {
1317 VR->swapBytes(Old: Endianness, New: llvm::endianness::native);
1318 VR = getValueProfRecordNext(This: VR);
1319 }
1320}
1321
1322void ValueProfData::swapBytesFromHost(llvm::endianness Endianness) {
1323 using namespace support;
1324
1325 if (Endianness == llvm::endianness::native)
1326 return;
1327
1328 ValueProfRecord *VR = getFirstValueProfRecord(This: this);
1329 for (uint32_t K = 0; K < NumValueKinds; K++) {
1330 ValueProfRecord *NVR = getValueProfRecordNext(This: VR);
1331 VR->swapBytes(Old: llvm::endianness::native, New: Endianness);
1332 VR = NVR;
1333 }
1334 sys::swapByteOrder<uint32_t>(Value&: TotalSize);
1335 sys::swapByteOrder<uint32_t>(Value&: NumValueKinds);
1336}
1337
1338void annotateValueSite(Module &M, Instruction &Inst,
1339 const InstrProfRecord &InstrProfR,
1340 InstrProfValueKind ValueKind, uint32_t SiteIdx,
1341 uint32_t MaxMDCount) {
1342 auto VDs = InstrProfR.getValueArrayForSite(ValueKind, Site: SiteIdx);
1343 if (VDs.empty())
1344 return;
1345 uint64_t Sum = 0;
1346 for (const InstrProfValueData &V : VDs)
1347 Sum = SaturatingAdd(X: Sum, Y: V.Count);
1348 annotateValueSite(M, Inst, VDs, Sum, ValueKind, MaxMDCount);
1349}
1350
1351void annotateValueSite(Module &M, Instruction &Inst,
1352 ArrayRef<InstrProfValueData> VDs,
1353 uint64_t Sum, InstrProfValueKind ValueKind,
1354 uint32_t MaxMDCount) {
1355 if (VDs.empty())
1356 return;
1357 LLVMContext &Ctx = M.getContext();
1358 MDBuilder MDHelper(Ctx);
1359 SmallVector<Metadata *, 3> Vals;
1360 // Tag
1361 Vals.push_back(Elt: MDHelper.createString(Str: "VP"));
1362 // Value Kind
1363 Vals.push_back(Elt: MDHelper.createConstant(
1364 C: ConstantInt::get(Ty: Type::getInt32Ty(C&: Ctx), V: ValueKind)));
1365 // Total Count
1366 Vals.push_back(
1367 Elt: MDHelper.createConstant(C: ConstantInt::get(Ty: Type::getInt64Ty(C&: Ctx), V: Sum)));
1368
1369 // Value Profile Data
1370 uint32_t MDCount = MaxMDCount;
1371 for (const auto &VD : VDs) {
1372 Vals.push_back(Elt: MDHelper.createConstant(
1373 C: ConstantInt::get(Ty: Type::getInt64Ty(C&: Ctx), V: VD.Value)));
1374 Vals.push_back(Elt: MDHelper.createConstant(
1375 C: ConstantInt::get(Ty: Type::getInt64Ty(C&: Ctx), V: VD.Count)));
1376 if (--MDCount == 0)
1377 break;
1378 }
1379 Inst.setMetadata(KindID: LLVMContext::MD_prof, Node: MDNode::get(Context&: Ctx, MDs: Vals));
1380}
1381
1382MDNode *mayHaveValueProfileOfKind(const Instruction &Inst,
1383 InstrProfValueKind ValueKind) {
1384 MDNode *MD = Inst.getMetadata(KindID: LLVMContext::MD_prof);
1385 if (!MD)
1386 return nullptr;
1387
1388 if (MD->getNumOperands() < 5)
1389 return nullptr;
1390
1391 MDString *Tag = cast<MDString>(Val: MD->getOperand(I: 0));
1392 if (!Tag || Tag->getString() != "VP")
1393 return nullptr;
1394
1395 // Now check kind:
1396 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: 1));
1397 if (!KindInt)
1398 return nullptr;
1399 if (KindInt->getZExtValue() != ValueKind)
1400 return nullptr;
1401
1402 return MD;
1403}
1404
1405SmallVector<InstrProfValueData, 4>
1406getValueProfDataFromInst(const Instruction &Inst, InstrProfValueKind ValueKind,
1407 uint32_t MaxNumValueData, uint64_t &TotalC,
1408 bool GetNoICPValue) {
1409 // Four inline elements seem to work well in practice. With MaxNumValueData,
1410 // this array won't grow very big anyway.
1411 SmallVector<InstrProfValueData, 4> ValueData;
1412 MDNode *MD = mayHaveValueProfileOfKind(Inst, ValueKind);
1413 if (!MD)
1414 return ValueData;
1415 const unsigned NOps = MD->getNumOperands();
1416 // Get total count
1417 ConstantInt *TotalCInt = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: 2));
1418 if (!TotalCInt)
1419 return ValueData;
1420 TotalC = TotalCInt->getZExtValue();
1421
1422 ValueData.reserve(N: (NOps - 3) / 2);
1423 for (unsigned I = 3; I < NOps; I += 2) {
1424 if (ValueData.size() >= MaxNumValueData)
1425 break;
1426 ConstantInt *Value = mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I));
1427 ConstantInt *Count =
1428 mdconst::dyn_extract<ConstantInt>(MD: MD->getOperand(I: I + 1));
1429 if (!Value || !Count) {
1430 ValueData.clear();
1431 return ValueData;
1432 }
1433 uint64_t CntValue = Count->getZExtValue();
1434 if (!GetNoICPValue && (CntValue == NOMORE_ICP_MAGICNUM))
1435 continue;
1436 InstrProfValueData V;
1437 V.Value = Value->getZExtValue();
1438 V.Count = CntValue;
1439 ValueData.push_back(Elt: V);
1440 }
1441 return ValueData;
1442}
1443
1444MDNode *getPGOFuncNameMetadata(const Function &F) {
1445 return F.getMetadata(Kind: getPGOFuncNameMetadataName());
1446}
1447
1448static void createPGONameMetadata(GlobalObject &GO, StringRef MetadataName,
1449 StringRef PGOName) {
1450 // Only for internal linkage functions or global variables. The name is not
1451 // the same as PGO name for these global objects.
1452 if (GO.getName() == PGOName)
1453 return;
1454
1455 // Don't create duplicated metadata.
1456 if (GO.getMetadata(Kind: MetadataName))
1457 return;
1458
1459 LLVMContext &C = GO.getContext();
1460 MDNode *N = MDNode::get(Context&: C, MDs: MDString::get(Context&: C, Str: PGOName));
1461 GO.setMetadata(Kind: MetadataName, Node: N);
1462}
1463
1464void createPGOFuncNameMetadata(Function &F, StringRef PGOFuncName) {
1465 return createPGONameMetadata(GO&: F, MetadataName: getPGOFuncNameMetadataName(), PGOName: PGOFuncName);
1466}
1467
1468void createPGONameMetadata(GlobalObject &GO, StringRef PGOName) {
1469 return createPGONameMetadata(GO, MetadataName: getPGONameMetadataName(), PGOName);
1470}
1471
1472bool needsComdatForCounter(const GlobalObject &GO, const Module &M) {
1473 if (GO.hasComdat())
1474 return true;
1475
1476 if (!M.getTargetTriple().supportsCOMDAT())
1477 return false;
1478
1479 // See createPGOFuncNameVar for more details. To avoid link errors, profile
1480 // counters for function with available_externally linkage needs to be changed
1481 // to linkonce linkage. On ELF based systems, this leads to weak symbols to be
1482 // created. Without using comdat, duplicate entries won't be removed by the
1483 // linker leading to increased data segement size and raw profile size. Even
1484 // worse, since the referenced counter from profile per-function data object
1485 // will be resolved to the common strong definition, the profile counts for
1486 // available_externally functions will end up being duplicated in raw profile
1487 // data. This can result in distorted profile as the counts of those dups
1488 // will be accumulated by the profile merger.
1489 GlobalValue::LinkageTypes Linkage = GO.getLinkage();
1490 if (Linkage != GlobalValue::ExternalWeakLinkage &&
1491 Linkage != GlobalValue::AvailableExternallyLinkage)
1492 return false;
1493
1494 return true;
1495}
1496
1497// Check if INSTR_PROF_RAW_VERSION_VAR is defined.
1498bool isIRPGOFlagSet(const Module *M) {
1499 const GlobalVariable *IRInstrVar =
1500 M->getNamedGlobal(INSTR_PROF_QUOTE(INSTR_PROF_RAW_VERSION_VAR));
1501 if (!IRInstrVar || IRInstrVar->hasLocalLinkage())
1502 return false;
1503
1504 // For CSPGO+LTO, this variable might be marked as non-prevailing and we only
1505 // have the decl.
1506 if (IRInstrVar->isDeclaration())
1507 return true;
1508
1509 // Check if the flag is set.
1510 if (!IRInstrVar->hasInitializer())
1511 return false;
1512
1513 auto *InitVal = dyn_cast_or_null<ConstantInt>(Val: IRInstrVar->getInitializer());
1514 if (!InitVal)
1515 return false;
1516 return (InitVal->getZExtValue() & VARIANT_MASK_IR_PROF) != 0;
1517}
1518
1519// Check if we can safely rename this Comdat function.
1520bool canRenameComdatFunc(const Function &F, bool CheckAddressTaken) {
1521 if (F.getName().empty())
1522 return false;
1523 if (!needsComdatForCounter(GO: F, M: *(F.getParent())))
1524 return false;
1525 // Unsafe to rename the address-taken function (which can be used in
1526 // function comparison).
1527 if (CheckAddressTaken && F.hasAddressTaken())
1528 return false;
1529 // Only safe to do if this function may be discarded if it is not used
1530 // in the compilation unit.
1531 if (!GlobalValue::isDiscardableIfUnused(Linkage: F.getLinkage()))
1532 return false;
1533
1534 // For AvailableExternallyLinkage functions.
1535 if (!F.hasComdat()) {
1536 assert(F.getLinkage() == GlobalValue::AvailableExternallyLinkage);
1537 return true;
1538 }
1539 return true;
1540}
1541
1542// Create the variable for the profile file name.
1543void createProfileFileNameVar(Module &M, StringRef InstrProfileOutput) {
1544 if (InstrProfileOutput.empty())
1545 return;
1546 Constant *ProfileNameConst =
1547 ConstantDataArray::getString(Context&: M.getContext(), Initializer: InstrProfileOutput, AddNull: true);
1548 GlobalVariable *ProfileNameVar = new GlobalVariable(
1549 M, ProfileNameConst->getType(), true, GlobalValue::WeakAnyLinkage,
1550 ProfileNameConst, INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_NAME_VAR));
1551 ProfileNameVar->setVisibility(GlobalValue::HiddenVisibility);
1552 Triple TT(M.getTargetTriple());
1553 if (TT.supportsCOMDAT()) {
1554 ProfileNameVar->setLinkage(GlobalValue::ExternalLinkage);
1555 ProfileNameVar->setComdat(M.getOrInsertComdat(
1556 Name: StringRef(INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_NAME_VAR))));
1557 }
1558}
1559
1560Error OverlapStats::accumulateCounts(const std::string &BaseFilename,
1561 const std::string &TestFilename,
1562 bool IsCS) {
1563 auto GetProfileSum = [IsCS](const std::string &Filename,
1564 CountSumOrPercent &Sum) -> Error {
1565 // This function is only used from llvm-profdata that doesn't use any kind
1566 // of VFS. Just create a default RealFileSystem to read profiles.
1567 auto FS = vfs::getRealFileSystem();
1568 auto ReaderOrErr = InstrProfReader::create(Path: Filename, FS&: *FS);
1569 if (Error E = ReaderOrErr.takeError()) {
1570 return E;
1571 }
1572 auto Reader = std::move(ReaderOrErr.get());
1573 Reader->accumulateCounts(Sum, IsCS);
1574 return Error::success();
1575 };
1576 auto Ret = GetProfileSum(BaseFilename, Base);
1577 if (Ret)
1578 return Ret;
1579 Ret = GetProfileSum(TestFilename, Test);
1580 if (Ret)
1581 return Ret;
1582 this->BaseFilename = &BaseFilename;
1583 this->TestFilename = &TestFilename;
1584 Valid = true;
1585 return Error::success();
1586}
1587
1588void OverlapStats::addOneMismatch(const CountSumOrPercent &MismatchFunc) {
1589 Mismatch.NumEntries += 1;
1590 Mismatch.CountSum += MismatchFunc.CountSum / Test.CountSum;
1591 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1592 if (Test.ValueCounts[I] >= 1.0f)
1593 Mismatch.ValueCounts[I] +=
1594 MismatchFunc.ValueCounts[I] / Test.ValueCounts[I];
1595 }
1596}
1597
1598void OverlapStats::addOneUnique(const CountSumOrPercent &UniqueFunc) {
1599 Unique.NumEntries += 1;
1600 Unique.CountSum += UniqueFunc.CountSum / Test.CountSum;
1601 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1602 if (Test.ValueCounts[I] >= 1.0f)
1603 Unique.ValueCounts[I] += UniqueFunc.ValueCounts[I] / Test.ValueCounts[I];
1604 }
1605}
1606
1607void OverlapStats::dump(raw_fd_ostream &OS) const {
1608 if (!Valid)
1609 return;
1610
1611 const char *EntryName =
1612 (Level == ProgramLevel ? "functions" : "edge counters");
1613 if (Level == ProgramLevel) {
1614 OS << "Profile overlap infomation for base_profile: " << *BaseFilename
1615 << " and test_profile: " << *TestFilename << "\nProgram level:\n";
1616 } else {
1617 OS << "Function level:\n"
1618 << " Function: " << FuncName << " (Hash=" << FuncHash << ")\n";
1619 }
1620
1621 OS << " # of " << EntryName << " overlap: " << Overlap.NumEntries << "\n";
1622 if (Mismatch.NumEntries)
1623 OS << " # of " << EntryName << " mismatch: " << Mismatch.NumEntries
1624 << "\n";
1625 if (Unique.NumEntries)
1626 OS << " # of " << EntryName
1627 << " only in test_profile: " << Unique.NumEntries << "\n";
1628
1629 OS << " Edge profile overlap: " << format(Fmt: "%.3f%%", Vals: Overlap.CountSum * 100)
1630 << "\n";
1631 if (Mismatch.NumEntries)
1632 OS << " Mismatched count percentage (Edge): "
1633 << format(Fmt: "%.3f%%", Vals: Mismatch.CountSum * 100) << "\n";
1634 if (Unique.NumEntries)
1635 OS << " Percentage of Edge profile only in test_profile: "
1636 << format(Fmt: "%.3f%%", Vals: Unique.CountSum * 100) << "\n";
1637 OS << " Edge profile base count sum: " << format(Fmt: "%.0f", Vals: Base.CountSum)
1638 << "\n"
1639 << " Edge profile test count sum: " << format(Fmt: "%.0f", Vals: Test.CountSum)
1640 << "\n";
1641
1642 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) {
1643 if (Base.ValueCounts[I] < 1.0f && Test.ValueCounts[I] < 1.0f)
1644 continue;
1645 char ProfileKindName[20] = {0};
1646 switch (I) {
1647 case IPVK_IndirectCallTarget:
1648 strncpy(dest: ProfileKindName, src: "IndirectCall", n: 19);
1649 break;
1650 case IPVK_MemOPSize:
1651 strncpy(dest: ProfileKindName, src: "MemOP", n: 19);
1652 break;
1653 case IPVK_VTableTarget:
1654 strncpy(dest: ProfileKindName, src: "VTable", n: 19);
1655 break;
1656 default:
1657 snprintf(s: ProfileKindName, maxlen: 19, format: "VP[%d]", I);
1658 break;
1659 }
1660 OS << " " << ProfileKindName
1661 << " profile overlap: " << format(Fmt: "%.3f%%", Vals: Overlap.ValueCounts[I] * 100)
1662 << "\n";
1663 if (Mismatch.NumEntries)
1664 OS << " Mismatched count percentage (" << ProfileKindName
1665 << "): " << format(Fmt: "%.3f%%", Vals: Mismatch.ValueCounts[I] * 100) << "\n";
1666 if (Unique.NumEntries)
1667 OS << " Percentage of " << ProfileKindName
1668 << " profile only in test_profile: "
1669 << format(Fmt: "%.3f%%", Vals: Unique.ValueCounts[I] * 100) << "\n";
1670 OS << " " << ProfileKindName
1671 << " profile base count sum: " << format(Fmt: "%.0f", Vals: Base.ValueCounts[I])
1672 << "\n"
1673 << " " << ProfileKindName
1674 << " profile test count sum: " << format(Fmt: "%.0f", Vals: Test.ValueCounts[I])
1675 << "\n";
1676 }
1677}
1678
1679namespace IndexedInstrProf {
1680Expected<Header> Header::readFromBuffer(const unsigned char *Buffer) {
1681 using namespace support;
1682 static_assert(std::is_standard_layout_v<Header>,
1683 "Use standard layout for Header for simplicity");
1684 Header H;
1685
1686 H.Magic = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1687 // Check the magic number.
1688 if (H.Magic != IndexedInstrProf::Magic)
1689 return make_error<InstrProfError>(Args: instrprof_error::bad_magic);
1690
1691 // Read the version.
1692 H.Version = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1693 if (H.getIndexedProfileVersion() >
1694 IndexedInstrProf::ProfVersion::CurrentVersion)
1695 return make_error<InstrProfError>(Args: instrprof_error::unsupported_version);
1696
1697 static_assert(IndexedInstrProf::ProfVersion::CurrentVersion == Version12,
1698 "Please update the reader as needed when a new field is added "
1699 "or when indexed profile version gets bumped.");
1700
1701 Buffer += sizeof(uint64_t); // Skip Header.Unused field.
1702 H.HashType = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1703 H.HashOffset = endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1704 if (H.getIndexedProfileVersion() >= 8)
1705 H.MemProfOffset =
1706 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1707 if (H.getIndexedProfileVersion() >= 9)
1708 H.BinaryIdOffset =
1709 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1710 // Version 11 is handled by this condition.
1711 if (H.getIndexedProfileVersion() >= 10)
1712 H.TemporalProfTracesOffset =
1713 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1714 if (H.getIndexedProfileVersion() >= 12)
1715 H.VTableNamesOffset =
1716 endian::readNext<uint64_t, llvm::endianness::little>(memory&: Buffer);
1717 return H;
1718}
1719
1720uint64_t Header::getIndexedProfileVersion() const {
1721 return GET_VERSION(Version);
1722}
1723
1724size_t Header::size() const {
1725 switch (getIndexedProfileVersion()) {
1726 // To retain backward compatibility, new fields must be appended to the end
1727 // of the header, and byte offset of existing fields shouldn't change when
1728 // indexed profile version gets incremented.
1729 static_assert(
1730 IndexedInstrProf::ProfVersion::CurrentVersion == Version12,
1731 "Please update the size computation below if a new field has "
1732 "been added to the header; for a version bump without new "
1733 "fields, add a case statement to fall through to the latest version.");
1734 case 12ull:
1735 return 72;
1736 case 11ull:
1737 [[fallthrough]];
1738 case 10ull:
1739 return 64;
1740 case 9ull:
1741 return 56;
1742 case 8ull:
1743 return 48;
1744 default: // Version7 (when the backwards compatible header was introduced).
1745 return 40;
1746 }
1747}
1748
1749} // namespace IndexedInstrProf
1750
1751} // end namespace llvm
1752

source code of llvm/lib/ProfileData/InstrProf.cpp