1 | //===- HotColdSplitting.cpp -- Outline Cold Regions -------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | /// |
9 | /// \file |
10 | /// The goal of hot/cold splitting is to improve the memory locality of code. |
11 | /// The splitting pass does this by identifying cold blocks and moving them into |
12 | /// separate functions. |
13 | /// |
14 | /// When the splitting pass finds a cold block (referred to as "the sink"), it |
15 | /// grows a maximal cold region around that block. The maximal region contains |
16 | /// all blocks (post-)dominated by the sink [*]. In theory, these blocks are as |
17 | /// cold as the sink. Once a region is found, it's split out of the original |
18 | /// function provided it's profitable to do so. |
19 | /// |
20 | /// [*] In practice, there is some added complexity because some blocks are not |
21 | /// safe to extract. |
22 | /// |
23 | /// TODO: Use the PM to get domtrees, and preserve BFI/BPI. |
24 | /// TODO: Reorder outlined functions. |
25 | /// |
26 | //===----------------------------------------------------------------------===// |
27 | |
28 | #include "llvm/Transforms/IPO/HotColdSplitting.h" |
29 | #include "llvm/ADT/PostOrderIterator.h" |
30 | #include "llvm/ADT/SmallVector.h" |
31 | #include "llvm/ADT/Statistic.h" |
32 | #include "llvm/Analysis/AssumptionCache.h" |
33 | #include "llvm/Analysis/BlockFrequencyInfo.h" |
34 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" |
35 | #include "llvm/Analysis/PostDominators.h" |
36 | #include "llvm/Analysis/ProfileSummaryInfo.h" |
37 | #include "llvm/Analysis/TargetTransformInfo.h" |
38 | #include "llvm/IR/BasicBlock.h" |
39 | #include "llvm/IR/CFG.h" |
40 | #include "llvm/IR/DiagnosticInfo.h" |
41 | #include "llvm/IR/Dominators.h" |
42 | #include "llvm/IR/Function.h" |
43 | #include "llvm/IR/Instruction.h" |
44 | #include "llvm/IR/Instructions.h" |
45 | #include "llvm/IR/Module.h" |
46 | #include "llvm/IR/PassManager.h" |
47 | #include "llvm/IR/ProfDataUtils.h" |
48 | #include "llvm/IR/User.h" |
49 | #include "llvm/IR/Value.h" |
50 | #include "llvm/Support/CommandLine.h" |
51 | #include "llvm/Support/Debug.h" |
52 | #include "llvm/Support/raw_ostream.h" |
53 | #include "llvm/Transforms/IPO.h" |
54 | #include "llvm/Transforms/Utils/CodeExtractor.h" |
55 | #include <algorithm> |
56 | #include <cassert> |
57 | #include <limits> |
58 | #include <string> |
59 | |
60 | #define DEBUG_TYPE "hotcoldsplit" |
61 | |
62 | STATISTIC(NumColdRegionsFound, "Number of cold regions found." ); |
63 | STATISTIC(NumColdRegionsOutlined, "Number of cold regions outlined." ); |
64 | |
65 | using namespace llvm; |
66 | |
67 | static cl::opt<bool> EnableStaticAnalysis("hot-cold-static-analysis" , |
68 | cl::init(Val: true), cl::Hidden); |
69 | |
70 | static cl::opt<int> |
71 | SplittingThreshold("hotcoldsplit-threshold" , cl::init(Val: 2), cl::Hidden, |
72 | cl::desc("Base penalty for splitting cold code (as a " |
73 | "multiple of TCC_Basic)" )); |
74 | |
75 | static cl::opt<bool> EnableColdSection( |
76 | "enable-cold-section" , cl::init(Val: false), cl::Hidden, |
77 | cl::desc("Enable placement of extracted cold functions" |
78 | " into a separate section after hot-cold splitting." )); |
79 | |
80 | static cl::opt<std::string> |
81 | ColdSectionName("hotcoldsplit-cold-section-name" , cl::init(Val: "__llvm_cold" ), |
82 | cl::Hidden, |
83 | cl::desc("Name for the section containing cold functions " |
84 | "extracted by hot-cold splitting." )); |
85 | |
86 | static cl::opt<int> MaxParametersForSplit( |
87 | "hotcoldsplit-max-params" , cl::init(Val: 4), cl::Hidden, |
88 | cl::desc("Maximum number of parameters for a split function" )); |
89 | |
90 | static cl::opt<int> ColdBranchProbDenom( |
91 | "hotcoldsplit-cold-probability-denom" , cl::init(Val: 100), cl::Hidden, |
92 | cl::desc("Divisor of cold branch probability." |
93 | "BranchProbability = 1/ColdBranchProbDenom" )); |
94 | |
95 | namespace { |
96 | // Same as blockEndsInUnreachable in CodeGen/BranchFolding.cpp. Do not modify |
97 | // this function unless you modify the MBB version as well. |
98 | // |
99 | /// A no successor, non-return block probably ends in unreachable and is cold. |
100 | /// Also consider a block that ends in an indirect branch to be a return block, |
101 | /// since many targets use plain indirect branches to return. |
102 | bool blockEndsInUnreachable(const BasicBlock &BB) { |
103 | if (!succ_empty(BB: &BB)) |
104 | return false; |
105 | if (BB.empty()) |
106 | return true; |
107 | const Instruction *I = BB.getTerminator(); |
108 | return !(isa<ReturnInst>(Val: I) || isa<IndirectBrInst>(Val: I)); |
109 | } |
110 | |
111 | void analyzeProfMetadata(BasicBlock *BB, |
112 | BranchProbability ColdProbThresh, |
113 | SmallPtrSetImpl<BasicBlock *> &AnnotatedColdBlocks) { |
114 | // TODO: Handle branches with > 2 successors. |
115 | BranchInst *CondBr = dyn_cast<BranchInst>(Val: BB->getTerminator()); |
116 | if (!CondBr) |
117 | return; |
118 | |
119 | uint64_t TrueWt, FalseWt; |
120 | if (!extractBranchWeights(I: *CondBr, TrueVal&: TrueWt, FalseVal&: FalseWt)) |
121 | return; |
122 | |
123 | auto SumWt = TrueWt + FalseWt; |
124 | if (SumWt == 0) |
125 | return; |
126 | |
127 | auto TrueProb = BranchProbability::getBranchProbability(Numerator: TrueWt, Denominator: SumWt); |
128 | auto FalseProb = BranchProbability::getBranchProbability(Numerator: FalseWt, Denominator: SumWt); |
129 | |
130 | if (TrueProb <= ColdProbThresh) |
131 | AnnotatedColdBlocks.insert(Ptr: CondBr->getSuccessor(i: 0)); |
132 | |
133 | if (FalseProb <= ColdProbThresh) |
134 | AnnotatedColdBlocks.insert(Ptr: CondBr->getSuccessor(i: 1)); |
135 | } |
136 | |
137 | bool unlikelyExecuted(BasicBlock &BB) { |
138 | // Exception handling blocks are unlikely executed. |
139 | if (BB.isEHPad() || isa<ResumeInst>(Val: BB.getTerminator())) |
140 | return true; |
141 | |
142 | // The block is cold if it calls/invokes a cold function. However, do not |
143 | // mark sanitizer traps as cold. |
144 | for (Instruction &I : BB) |
145 | if (auto *CB = dyn_cast<CallBase>(Val: &I)) |
146 | if (CB->hasFnAttr(Attribute::Cold) && |
147 | !CB->getMetadata(KindID: LLVMContext::MD_nosanitize)) |
148 | return true; |
149 | |
150 | // The block is cold if it has an unreachable terminator, unless it's |
151 | // preceded by a call to a (possibly warm) noreturn call (e.g. longjmp). |
152 | if (blockEndsInUnreachable(BB)) { |
153 | if (auto *CI = |
154 | dyn_cast_or_null<CallInst>(Val: BB.getTerminator()->getPrevNode())) |
155 | if (CI->hasFnAttr(Attribute::NoReturn)) |
156 | return false; |
157 | return true; |
158 | } |
159 | |
160 | return false; |
161 | } |
162 | |
163 | /// Check whether it's safe to outline \p BB. |
164 | static bool (const BasicBlock &BB) { |
165 | // EH pads are unsafe to outline because doing so breaks EH type tables. It |
166 | // follows that invoke instructions cannot be extracted, because CodeExtractor |
167 | // requires unwind destinations to be within the extraction region. |
168 | // |
169 | // Resumes that are not reachable from a cleanup landing pad are considered to |
170 | // be unreachable. It’s not safe to split them out either. |
171 | if (BB.hasAddressTaken() || BB.isEHPad()) |
172 | return false; |
173 | auto Term = BB.getTerminator(); |
174 | return !isa<InvokeInst>(Val: Term) && !isa<ResumeInst>(Val: Term); |
175 | } |
176 | |
177 | /// Mark \p F cold. Based on this assumption, also optimize it for minimum size. |
178 | /// If \p UpdateEntryCount is true (set when this is a new split function and |
179 | /// module has profile data), set entry count to 0 to ensure treated as cold. |
180 | /// Return true if the function is changed. |
181 | static bool markFunctionCold(Function &F, bool UpdateEntryCount = false) { |
182 | assert(!F.hasOptNone() && "Can't mark this cold" ); |
183 | bool Changed = false; |
184 | if (!F.hasFnAttribute(Attribute::Cold)) { |
185 | F.addFnAttr(Attribute::Cold); |
186 | Changed = true; |
187 | } |
188 | if (!F.hasFnAttribute(Attribute::MinSize)) { |
189 | F.addFnAttr(Attribute::MinSize); |
190 | Changed = true; |
191 | } |
192 | if (UpdateEntryCount) { |
193 | // Set the entry count to 0 to ensure it is placed in the unlikely text |
194 | // section when function sections are enabled. |
195 | F.setEntryCount(Count: 0); |
196 | Changed = true; |
197 | } |
198 | |
199 | return Changed; |
200 | } |
201 | |
202 | } // end anonymous namespace |
203 | |
204 | /// Check whether \p F is inherently cold. |
205 | bool HotColdSplitting::isFunctionCold(const Function &F) const { |
206 | if (F.hasFnAttribute(Attribute::Cold)) |
207 | return true; |
208 | |
209 | if (F.getCallingConv() == CallingConv::Cold) |
210 | return true; |
211 | |
212 | if (PSI->isFunctionEntryCold(F: &F)) |
213 | return true; |
214 | |
215 | return false; |
216 | } |
217 | |
218 | bool HotColdSplitting::isBasicBlockCold( |
219 | BasicBlock *BB, BranchProbability ColdProbThresh, |
220 | SmallPtrSetImpl<BasicBlock *> &AnnotatedColdBlocks, |
221 | BlockFrequencyInfo *BFI) const { |
222 | if (BFI) { |
223 | if (PSI->isColdBlock(BB, BFI)) |
224 | return true; |
225 | } else { |
226 | // Find cold blocks of successors of BB during a reverse postorder traversal. |
227 | analyzeProfMetadata(BB, ColdProbThresh, AnnotatedColdBlocks); |
228 | |
229 | // A statically cold BB would be known before it is visited |
230 | // because the prof-data of incoming edges are 'analyzed' as part of RPOT. |
231 | if (AnnotatedColdBlocks.count(Ptr: BB)) |
232 | return true; |
233 | } |
234 | |
235 | if (EnableStaticAnalysis && unlikelyExecuted(BB&: *BB)) |
236 | return true; |
237 | |
238 | return false; |
239 | } |
240 | |
241 | // Returns false if the function should not be considered for hot-cold split |
242 | // optimization. |
243 | bool HotColdSplitting::shouldOutlineFrom(const Function &F) const { |
244 | if (F.hasFnAttribute(Attribute::AlwaysInline)) |
245 | return false; |
246 | |
247 | if (F.hasFnAttribute(Attribute::NoInline)) |
248 | return false; |
249 | |
250 | // A function marked `noreturn` may contain unreachable terminators: these |
251 | // should not be considered cold, as the function may be a trampoline. |
252 | if (F.hasFnAttribute(Attribute::NoReturn)) |
253 | return false; |
254 | |
255 | if (F.hasFnAttribute(Attribute::SanitizeAddress) || |
256 | F.hasFnAttribute(Attribute::SanitizeHWAddress) || |
257 | F.hasFnAttribute(Attribute::SanitizeThread) || |
258 | F.hasFnAttribute(Attribute::SanitizeMemory)) |
259 | return false; |
260 | |
261 | return true; |
262 | } |
263 | |
264 | /// Get the benefit score of outlining \p Region. |
265 | static InstructionCost getOutliningBenefit(ArrayRef<BasicBlock *> Region, |
266 | TargetTransformInfo &TTI) { |
267 | // Sum up the code size costs of non-terminator instructions. Tight coupling |
268 | // with \ref getOutliningPenalty is needed to model the costs of terminators. |
269 | InstructionCost Benefit = 0; |
270 | for (BasicBlock *BB : Region) |
271 | for (Instruction &I : BB->instructionsWithoutDebug()) |
272 | if (&I != BB->getTerminator()) |
273 | Benefit += |
274 | TTI.getInstructionCost(U: &I, CostKind: TargetTransformInfo::TCK_CodeSize); |
275 | |
276 | return Benefit; |
277 | } |
278 | |
279 | /// Get the penalty score for outlining \p Region. |
280 | static int getOutliningPenalty(ArrayRef<BasicBlock *> Region, |
281 | unsigned NumInputs, unsigned NumOutputs) { |
282 | int Penalty = SplittingThreshold; |
283 | LLVM_DEBUG(dbgs() << "Applying penalty for splitting: " << Penalty << "\n" ); |
284 | |
285 | // If the splitting threshold is set at or below zero, skip the usual |
286 | // profitability check. |
287 | if (SplittingThreshold <= 0) |
288 | return Penalty; |
289 | |
290 | // Find the number of distinct exit blocks for the region. Use a conservative |
291 | // check to determine whether control returns from the region. |
292 | bool NoBlocksReturn = true; |
293 | SmallPtrSet<BasicBlock *, 2> SuccsOutsideRegion; |
294 | for (BasicBlock *BB : Region) { |
295 | // If a block has no successors, only assume it does not return if it's |
296 | // unreachable. |
297 | if (succ_empty(BB)) { |
298 | NoBlocksReturn &= isa<UnreachableInst>(Val: BB->getTerminator()); |
299 | continue; |
300 | } |
301 | |
302 | for (BasicBlock *SuccBB : successors(BB)) { |
303 | if (!is_contained(Range&: Region, Element: SuccBB)) { |
304 | NoBlocksReturn = false; |
305 | SuccsOutsideRegion.insert(Ptr: SuccBB); |
306 | } |
307 | } |
308 | } |
309 | |
310 | // Count the number of phis in exit blocks with >= 2 incoming values from the |
311 | // outlining region. These phis are split (\ref severSplitPHINodesOfExits), |
312 | // and new outputs are created to supply the split phis. CodeExtractor can't |
313 | // report these new outputs until extraction begins, but it's important to |
314 | // factor the cost of the outputs into the cost calculation. |
315 | unsigned NumSplitExitPhis = 0; |
316 | for (BasicBlock *ExitBB : SuccsOutsideRegion) { |
317 | for (PHINode &PN : ExitBB->phis()) { |
318 | // Find all incoming values from the outlining region. |
319 | int NumIncomingVals = 0; |
320 | for (unsigned i = 0; i < PN.getNumIncomingValues(); ++i) |
321 | if (llvm::is_contained(Range&: Region, Element: PN.getIncomingBlock(i))) { |
322 | ++NumIncomingVals; |
323 | if (NumIncomingVals > 1) { |
324 | ++NumSplitExitPhis; |
325 | break; |
326 | } |
327 | } |
328 | } |
329 | } |
330 | |
331 | // Apply a penalty for calling the split function. Factor in the cost of |
332 | // materializing all of the parameters. |
333 | int NumOutputsAndSplitPhis = NumOutputs + NumSplitExitPhis; |
334 | int NumParams = NumInputs + NumOutputsAndSplitPhis; |
335 | if (NumParams > MaxParametersForSplit) { |
336 | LLVM_DEBUG(dbgs() << NumInputs << " inputs and " << NumOutputsAndSplitPhis |
337 | << " outputs exceeds parameter limit (" |
338 | << MaxParametersForSplit << ")\n" ); |
339 | return std::numeric_limits<int>::max(); |
340 | } |
341 | const int CostForArgMaterialization = 2 * TargetTransformInfo::TCC_Basic; |
342 | LLVM_DEBUG(dbgs() << "Applying penalty for: " << NumParams << " params\n" ); |
343 | Penalty += CostForArgMaterialization * NumParams; |
344 | |
345 | // Apply the typical code size cost for an output alloca and its associated |
346 | // reload in the caller. Also penalize the associated store in the callee. |
347 | LLVM_DEBUG(dbgs() << "Applying penalty for: " << NumOutputsAndSplitPhis |
348 | << " outputs/split phis\n" ); |
349 | const int CostForRegionOutput = 3 * TargetTransformInfo::TCC_Basic; |
350 | Penalty += CostForRegionOutput * NumOutputsAndSplitPhis; |
351 | |
352 | // Apply a `noreturn` bonus. |
353 | if (NoBlocksReturn) { |
354 | LLVM_DEBUG(dbgs() << "Applying bonus for: " << Region.size() |
355 | << " non-returning terminators\n" ); |
356 | Penalty -= Region.size(); |
357 | } |
358 | |
359 | // Apply a penalty for having more than one successor outside of the region. |
360 | // This penalty accounts for the switch needed in the caller. |
361 | if (SuccsOutsideRegion.size() > 1) { |
362 | LLVM_DEBUG(dbgs() << "Applying penalty for: " << SuccsOutsideRegion.size() |
363 | << " non-region successors\n" ); |
364 | Penalty += (SuccsOutsideRegion.size() - 1) * TargetTransformInfo::TCC_Basic; |
365 | } |
366 | |
367 | return Penalty; |
368 | } |
369 | |
370 | // Determine if it is beneficial to split the \p Region. |
371 | bool HotColdSplitting::(CodeExtractor &CE, |
372 | const BlockSequence &Region, |
373 | TargetTransformInfo &TTI) { |
374 | assert(!Region.empty()); |
375 | |
376 | // Perform a simple cost/benefit analysis to decide whether or not to permit |
377 | // splitting. |
378 | SetVector<Value *> Inputs, Outputs, Sinks; |
379 | CE.findInputsOutputs(Inputs, Outputs, Allocas: Sinks); |
380 | InstructionCost OutliningBenefit = getOutliningBenefit(Region, TTI); |
381 | int OutliningPenalty = |
382 | getOutliningPenalty(Region, NumInputs: Inputs.size(), NumOutputs: Outputs.size()); |
383 | LLVM_DEBUG(dbgs() << "Split profitability: benefit = " << OutliningBenefit |
384 | << ", penalty = " << OutliningPenalty << "\n" ); |
385 | if (!OutliningBenefit.isValid() || OutliningBenefit <= OutliningPenalty) |
386 | return false; |
387 | |
388 | return true; |
389 | } |
390 | |
391 | // Split the single \p EntryPoint cold region. \p CE is the region code |
392 | // extractor. |
393 | Function *HotColdSplitting::( |
394 | BasicBlock &EntryPoint, CodeExtractor &CE, |
395 | const CodeExtractorAnalysisCache &CEAC, BlockFrequencyInfo *BFI, |
396 | TargetTransformInfo &TTI, OptimizationRemarkEmitter &ORE) { |
397 | Function *OrigF = EntryPoint.getParent(); |
398 | if (Function *OutF = CE.extractCodeRegion(CEAC)) { |
399 | User *U = *OutF->user_begin(); |
400 | CallInst *CI = cast<CallInst>(Val: U); |
401 | NumColdRegionsOutlined++; |
402 | if (TTI.useColdCCForColdCall(F&: *OutF)) { |
403 | OutF->setCallingConv(CallingConv::Cold); |
404 | CI->setCallingConv(CallingConv::Cold); |
405 | } |
406 | CI->setIsNoInline(); |
407 | |
408 | if (EnableColdSection) |
409 | OutF->setSection(ColdSectionName); |
410 | else { |
411 | if (OrigF->hasSection()) |
412 | OutF->setSection(OrigF->getSection()); |
413 | } |
414 | |
415 | markFunctionCold(F&: *OutF, UpdateEntryCount: BFI != nullptr); |
416 | |
417 | LLVM_DEBUG(llvm::dbgs() << "Outlined Region: " << *OutF); |
418 | ORE.emit(RemarkBuilder: [&]() { |
419 | return OptimizationRemark(DEBUG_TYPE, "HotColdSplit" , |
420 | &*EntryPoint.begin()) |
421 | << ore::NV("Original" , OrigF) << " split cold code into " |
422 | << ore::NV("Split" , OutF); |
423 | }); |
424 | return OutF; |
425 | } |
426 | |
427 | ORE.emit(RemarkBuilder: [&]() { |
428 | return OptimizationRemarkMissed(DEBUG_TYPE, "ExtractFailed" , |
429 | &*EntryPoint.begin()) |
430 | << "Failed to extract region at block " |
431 | << ore::NV("Block" , &EntryPoint); |
432 | }); |
433 | return nullptr; |
434 | } |
435 | |
436 | /// A pair of (basic block, score). |
437 | using BlockTy = std::pair<BasicBlock *, unsigned>; |
438 | |
439 | namespace { |
440 | /// A maximal outlining region. This contains all blocks post-dominated by a |
441 | /// sink block, the sink block itself, and all blocks dominated by the sink. |
442 | /// If sink-predecessors and sink-successors cannot be extracted in one region, |
443 | /// the static constructor returns a list of suitable extraction regions. |
444 | class OutliningRegion { |
445 | /// A list of (block, score) pairs. A block's score is non-zero iff it's a |
446 | /// viable sub-region entry point. Blocks with higher scores are better entry |
447 | /// points (i.e. they are more distant ancestors of the sink block). |
448 | SmallVector<BlockTy, 0> Blocks = {}; |
449 | |
450 | /// The suggested entry point into the region. If the region has multiple |
451 | /// entry points, all blocks within the region may not be reachable from this |
452 | /// entry point. |
453 | BasicBlock *SuggestedEntryPoint = nullptr; |
454 | |
455 | /// Whether the entire function is cold. |
456 | bool EntireFunctionCold = false; |
457 | |
458 | /// If \p BB is a viable entry point, return \p Score. Return 0 otherwise. |
459 | static unsigned getEntryPointScore(BasicBlock &BB, unsigned Score) { |
460 | return mayExtractBlock(BB) ? Score : 0; |
461 | } |
462 | |
463 | /// These scores should be lower than the score for predecessor blocks, |
464 | /// because regions starting at predecessor blocks are typically larger. |
465 | static constexpr unsigned ScoreForSuccBlock = 1; |
466 | static constexpr unsigned ScoreForSinkBlock = 1; |
467 | |
468 | OutliningRegion(const OutliningRegion &) = delete; |
469 | OutliningRegion &operator=(const OutliningRegion &) = delete; |
470 | |
471 | public: |
472 | OutliningRegion() = default; |
473 | OutliningRegion(OutliningRegion &&) = default; |
474 | OutliningRegion &operator=(OutliningRegion &&) = default; |
475 | |
476 | static std::vector<OutliningRegion> create(BasicBlock &SinkBB, |
477 | const DominatorTree &DT, |
478 | const PostDominatorTree &PDT) { |
479 | std::vector<OutliningRegion> Regions; |
480 | SmallPtrSet<BasicBlock *, 4> RegionBlocks; |
481 | |
482 | Regions.emplace_back(); |
483 | OutliningRegion *ColdRegion = &Regions.back(); |
484 | |
485 | auto addBlockToRegion = [&](BasicBlock *BB, unsigned Score) { |
486 | RegionBlocks.insert(Ptr: BB); |
487 | ColdRegion->Blocks.emplace_back(Args&: BB, Args&: Score); |
488 | }; |
489 | |
490 | // The ancestor farthest-away from SinkBB, and also post-dominated by it. |
491 | unsigned SinkScore = getEntryPointScore(BB&: SinkBB, Score: ScoreForSinkBlock); |
492 | ColdRegion->SuggestedEntryPoint = (SinkScore > 0) ? &SinkBB : nullptr; |
493 | unsigned BestScore = SinkScore; |
494 | |
495 | // Visit SinkBB's ancestors using inverse DFS. |
496 | auto PredIt = ++idf_begin(G: &SinkBB); |
497 | auto PredEnd = idf_end(G: &SinkBB); |
498 | while (PredIt != PredEnd) { |
499 | BasicBlock &PredBB = **PredIt; |
500 | bool SinkPostDom = PDT.dominates(A: &SinkBB, B: &PredBB); |
501 | |
502 | // If the predecessor is cold and has no predecessors, the entire |
503 | // function must be cold. |
504 | if (SinkPostDom && pred_empty(BB: &PredBB)) { |
505 | ColdRegion->EntireFunctionCold = true; |
506 | return Regions; |
507 | } |
508 | |
509 | // If SinkBB does not post-dominate a predecessor, do not mark the |
510 | // predecessor (or any of its predecessors) cold. |
511 | if (!SinkPostDom || !mayExtractBlock(BB: PredBB)) { |
512 | PredIt.skipChildren(); |
513 | continue; |
514 | } |
515 | |
516 | // Keep track of the post-dominated ancestor farthest away from the sink. |
517 | // The path length is always >= 2, ensuring that predecessor blocks are |
518 | // considered as entry points before the sink block. |
519 | unsigned PredScore = getEntryPointScore(BB&: PredBB, Score: PredIt.getPathLength()); |
520 | if (PredScore > BestScore) { |
521 | ColdRegion->SuggestedEntryPoint = &PredBB; |
522 | BestScore = PredScore; |
523 | } |
524 | |
525 | addBlockToRegion(&PredBB, PredScore); |
526 | ++PredIt; |
527 | } |
528 | |
529 | // If the sink can be added to the cold region, do so. It's considered as |
530 | // an entry point before any sink-successor blocks. |
531 | // |
532 | // Otherwise, split cold sink-successor blocks using a separate region. |
533 | // This satisfies the requirement that all extraction blocks other than the |
534 | // first have predecessors within the extraction region. |
535 | if (mayExtractBlock(BB: SinkBB)) { |
536 | addBlockToRegion(&SinkBB, SinkScore); |
537 | if (pred_empty(BB: &SinkBB)) { |
538 | ColdRegion->EntireFunctionCold = true; |
539 | return Regions; |
540 | } |
541 | } else { |
542 | Regions.emplace_back(); |
543 | ColdRegion = &Regions.back(); |
544 | BestScore = 0; |
545 | } |
546 | |
547 | // Find all successors of SinkBB dominated by SinkBB using DFS. |
548 | auto SuccIt = ++df_begin(G: &SinkBB); |
549 | auto SuccEnd = df_end(G: &SinkBB); |
550 | while (SuccIt != SuccEnd) { |
551 | BasicBlock &SuccBB = **SuccIt; |
552 | bool SinkDom = DT.dominates(A: &SinkBB, B: &SuccBB); |
553 | |
554 | // Don't allow the backwards & forwards DFSes to mark the same block. |
555 | bool DuplicateBlock = RegionBlocks.count(Ptr: &SuccBB); |
556 | |
557 | // If SinkBB does not dominate a successor, do not mark the successor (or |
558 | // any of its successors) cold. |
559 | if (DuplicateBlock || !SinkDom || !mayExtractBlock(BB: SuccBB)) { |
560 | SuccIt.skipChildren(); |
561 | continue; |
562 | } |
563 | |
564 | unsigned SuccScore = getEntryPointScore(BB&: SuccBB, Score: ScoreForSuccBlock); |
565 | if (SuccScore > BestScore) { |
566 | ColdRegion->SuggestedEntryPoint = &SuccBB; |
567 | BestScore = SuccScore; |
568 | } |
569 | |
570 | addBlockToRegion(&SuccBB, SuccScore); |
571 | ++SuccIt; |
572 | } |
573 | |
574 | return Regions; |
575 | } |
576 | |
577 | /// Whether this region has nothing to extract. |
578 | bool empty() const { return !SuggestedEntryPoint; } |
579 | |
580 | /// The blocks in this region. |
581 | ArrayRef<std::pair<BasicBlock *, unsigned>> blocks() const { return Blocks; } |
582 | |
583 | /// Whether the entire function containing this region is cold. |
584 | bool isEntireFunctionCold() const { return EntireFunctionCold; } |
585 | |
586 | /// Remove a sub-region from this region and return it as a block sequence. |
587 | BlockSequence takeSingleEntrySubRegion(DominatorTree &DT) { |
588 | assert(!empty() && !isEntireFunctionCold() && "Nothing to extract" ); |
589 | |
590 | // Remove blocks dominated by the suggested entry point from this region. |
591 | // During the removal, identify the next best entry point into the region. |
592 | // Ensure that the first extracted block is the suggested entry point. |
593 | BlockSequence SubRegion = {SuggestedEntryPoint}; |
594 | BasicBlock *NextEntryPoint = nullptr; |
595 | unsigned NextScore = 0; |
596 | auto RegionEndIt = Blocks.end(); |
597 | auto RegionStartIt = remove_if(Range&: Blocks, P: [&](const BlockTy &Block) { |
598 | BasicBlock *BB = Block.first; |
599 | unsigned Score = Block.second; |
600 | bool InSubRegion = |
601 | BB == SuggestedEntryPoint || DT.dominates(A: SuggestedEntryPoint, B: BB); |
602 | if (!InSubRegion && Score > NextScore) { |
603 | NextEntryPoint = BB; |
604 | NextScore = Score; |
605 | } |
606 | if (InSubRegion && BB != SuggestedEntryPoint) |
607 | SubRegion.push_back(Elt: BB); |
608 | return InSubRegion; |
609 | }); |
610 | Blocks.erase(CS: RegionStartIt, CE: RegionEndIt); |
611 | |
612 | // Update the suggested entry point. |
613 | SuggestedEntryPoint = NextEntryPoint; |
614 | |
615 | return SubRegion; |
616 | } |
617 | }; |
618 | } // namespace |
619 | |
620 | bool HotColdSplitting::outlineColdRegions(Function &F, bool HasProfileSummary) { |
621 | // The set of cold blocks outlined. |
622 | SmallPtrSet<BasicBlock *, 4> ColdBlocks; |
623 | |
624 | // The set of cold blocks cannot be outlined. |
625 | SmallPtrSet<BasicBlock *, 4> CannotBeOutlinedColdBlocks; |
626 | |
627 | // Set of cold blocks obtained with RPOT. |
628 | SmallPtrSet<BasicBlock *, 4> AnnotatedColdBlocks; |
629 | |
630 | // The worklist of non-intersecting regions left to outline. The first member |
631 | // of the pair is the entry point into the region to be outlined. |
632 | SmallVector<std::pair<BasicBlock *, CodeExtractor>, 2> OutliningWorklist; |
633 | |
634 | // Set up an RPO traversal. Experimentally, this performs better (outlines |
635 | // more) than a PO traversal, because we prevent region overlap by keeping |
636 | // the first region to contain a block. |
637 | ReversePostOrderTraversal<Function *> RPOT(&F); |
638 | |
639 | // Calculate domtrees lazily. This reduces compile-time significantly. |
640 | std::unique_ptr<DominatorTree> DT; |
641 | std::unique_ptr<PostDominatorTree> PDT; |
642 | |
643 | // Calculate BFI lazily (it's only used to query ProfileSummaryInfo). This |
644 | // reduces compile-time significantly. TODO: When we *do* use BFI, we should |
645 | // be able to salvage its domtrees instead of recomputing them. |
646 | BlockFrequencyInfo *BFI = nullptr; |
647 | if (HasProfileSummary) |
648 | BFI = GetBFI(F); |
649 | |
650 | TargetTransformInfo &TTI = GetTTI(F); |
651 | OptimizationRemarkEmitter &ORE = (*GetORE)(F); |
652 | AssumptionCache *AC = LookupAC(F); |
653 | auto ColdProbThresh = TTI.getPredictableBranchThreshold().getCompl(); |
654 | |
655 | if (ColdBranchProbDenom.getNumOccurrences()) |
656 | ColdProbThresh = BranchProbability(1, ColdBranchProbDenom.getValue()); |
657 | |
658 | unsigned OutlinedFunctionID = 1; |
659 | // Find all cold regions. |
660 | for (BasicBlock *BB : RPOT) { |
661 | // This block is already part of some outlining region. |
662 | if (ColdBlocks.count(Ptr: BB)) |
663 | continue; |
664 | |
665 | // This block is already part of some region cannot be outlined. |
666 | if (CannotBeOutlinedColdBlocks.count(Ptr: BB)) |
667 | continue; |
668 | |
669 | if (!isBasicBlockCold(BB, ColdProbThresh, AnnotatedColdBlocks, BFI)) |
670 | continue; |
671 | |
672 | LLVM_DEBUG({ |
673 | dbgs() << "Found a cold block:\n" ; |
674 | BB->dump(); |
675 | }); |
676 | |
677 | if (!DT) |
678 | DT = std::make_unique<DominatorTree>(args&: F); |
679 | if (!PDT) |
680 | PDT = std::make_unique<PostDominatorTree>(args&: F); |
681 | |
682 | auto Regions = OutliningRegion::create(SinkBB&: *BB, DT: *DT, PDT: *PDT); |
683 | for (OutliningRegion &Region : Regions) { |
684 | if (Region.empty()) |
685 | continue; |
686 | |
687 | if (Region.isEntireFunctionCold()) { |
688 | LLVM_DEBUG(dbgs() << "Entire function is cold\n" ); |
689 | return markFunctionCold(F); |
690 | } |
691 | |
692 | do { |
693 | BlockSequence SubRegion = Region.takeSingleEntrySubRegion(DT&: *DT); |
694 | LLVM_DEBUG({ |
695 | dbgs() << "Hot/cold splitting attempting to outline these blocks:\n" ; |
696 | for (BasicBlock *BB : SubRegion) |
697 | BB->dump(); |
698 | }); |
699 | |
700 | // TODO: Pass BFI and BPI to update profile information. |
701 | CodeExtractor CE( |
702 | SubRegion, &*DT, /* AggregateArgs */ false, /* BFI */ nullptr, |
703 | /* BPI */ nullptr, AC, /* AllowVarArgs */ false, |
704 | /* AllowAlloca */ false, /* AllocaBlock */ nullptr, |
705 | /* Suffix */ "cold." + std::to_string(val: OutlinedFunctionID)); |
706 | |
707 | if (CE.isEligible() && isSplittingBeneficial(CE, Region: SubRegion, TTI) && |
708 | // If this outlining region intersects with another, drop the new |
709 | // region. |
710 | // |
711 | // TODO: It's theoretically possible to outline more by only keeping |
712 | // the largest region which contains a block, but the extra |
713 | // bookkeeping to do this is tricky/expensive. |
714 | none_of(Range&: SubRegion, P: [&](BasicBlock *Block) { |
715 | return ColdBlocks.contains(Ptr: Block); |
716 | })) { |
717 | ColdBlocks.insert(I: SubRegion.begin(), E: SubRegion.end()); |
718 | |
719 | LLVM_DEBUG({ |
720 | for (auto *Block : SubRegion) |
721 | dbgs() << " contains cold block:" << Block->getName() << "\n" ; |
722 | }); |
723 | |
724 | OutliningWorklist.emplace_back( |
725 | Args: std::make_pair(x&: SubRegion[0], y: std::move(CE))); |
726 | ++OutlinedFunctionID; |
727 | } else { |
728 | // The cold block region cannot be outlined. |
729 | for (auto *Block : SubRegion) |
730 | if ((DT->dominates(A: BB, B: Block) && PDT->dominates(A: Block, B: BB)) || |
731 | (PDT->dominates(A: BB, B: Block) && DT->dominates(A: Block, B: BB))) |
732 | // Will skip this cold block in the loop to save the compile time |
733 | CannotBeOutlinedColdBlocks.insert(Ptr: Block); |
734 | } |
735 | } while (!Region.empty()); |
736 | |
737 | ++NumColdRegionsFound; |
738 | } |
739 | } |
740 | |
741 | if (OutliningWorklist.empty()) |
742 | return false; |
743 | |
744 | // Outline single-entry cold regions, splitting up larger regions as needed. |
745 | // Cache and recycle the CodeExtractor analysis to avoid O(n^2) compile-time. |
746 | CodeExtractorAnalysisCache CEAC(F); |
747 | for (auto &BCE : OutliningWorklist) { |
748 | Function *Outlined = |
749 | extractColdRegion(EntryPoint&: *BCE.first, CE&: BCE.second, CEAC, BFI, TTI, ORE); |
750 | assert(Outlined && "Should be outlined" ); |
751 | (void)Outlined; |
752 | } |
753 | |
754 | return true; |
755 | } |
756 | |
757 | bool HotColdSplitting::run(Module &M) { |
758 | bool Changed = false; |
759 | bool HasProfileSummary = (M.getProfileSummary(/* IsCS */ false) != nullptr); |
760 | for (Function &F : M) { |
761 | // Do not touch declarations. |
762 | if (F.isDeclaration()) |
763 | continue; |
764 | |
765 | // Do not modify `optnone` functions. |
766 | if (F.hasOptNone()) |
767 | continue; |
768 | |
769 | // Detect inherently cold functions and mark them as such. |
770 | if (isFunctionCold(F)) { |
771 | Changed |= markFunctionCold(F); |
772 | continue; |
773 | } |
774 | |
775 | if (!shouldOutlineFrom(F)) { |
776 | LLVM_DEBUG(llvm::dbgs() << "Skipping " << F.getName() << "\n" ); |
777 | continue; |
778 | } |
779 | |
780 | LLVM_DEBUG(llvm::dbgs() << "Outlining in " << F.getName() << "\n" ); |
781 | Changed |= outlineColdRegions(F, HasProfileSummary); |
782 | } |
783 | return Changed; |
784 | } |
785 | |
786 | PreservedAnalyses |
787 | HotColdSplittingPass::run(Module &M, ModuleAnalysisManager &AM) { |
788 | auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(IR&: M).getManager(); |
789 | |
790 | auto LookupAC = [&FAM](Function &F) -> AssumptionCache * { |
791 | return FAM.getCachedResult<AssumptionAnalysis>(IR&: F); |
792 | }; |
793 | |
794 | auto GBFI = [&FAM](Function &F) { |
795 | return &FAM.getResult<BlockFrequencyAnalysis>(IR&: F); |
796 | }; |
797 | |
798 | std::function<TargetTransformInfo &(Function &)> GTTI = |
799 | [&FAM](Function &F) -> TargetTransformInfo & { |
800 | return FAM.getResult<TargetIRAnalysis>(IR&: F); |
801 | }; |
802 | |
803 | std::unique_ptr<OptimizationRemarkEmitter> ORE; |
804 | std::function<OptimizationRemarkEmitter &(Function &)> GetORE = |
805 | [&ORE](Function &F) -> OptimizationRemarkEmitter & { |
806 | ORE.reset(p: new OptimizationRemarkEmitter(&F)); |
807 | return *ORE; |
808 | }; |
809 | |
810 | ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(IR&: M); |
811 | |
812 | if (HotColdSplitting(PSI, GBFI, GTTI, &GetORE, LookupAC).run(M)) |
813 | return PreservedAnalyses::none(); |
814 | return PreservedAnalyses::all(); |
815 | } |
816 | |