1//===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7/// \file
8/// This file implements a TargetTransformInfo analysis pass specific to the
9/// Hexagon target machine. It uses the target's detailed information to provide
10/// more precise answers to certain TTI queries, while letting the target
11/// independent and default TTI implementations handle the rest.
12///
13//===----------------------------------------------------------------------===//
14
15#include "HexagonTargetTransformInfo.h"
16#include "HexagonSubtarget.h"
17#include "llvm/Analysis/TargetTransformInfo.h"
18#include "llvm/CodeGen/ValueTypes.h"
19#include "llvm/IR/InstrTypes.h"
20#include "llvm/IR/Instructions.h"
21#include "llvm/IR/User.h"
22#include "llvm/Support/Casting.h"
23#include "llvm/Support/CommandLine.h"
24#include "llvm/Transforms/Utils/LoopPeel.h"
25#include "llvm/Transforms/Utils/UnrollLoop.h"
26
27using namespace llvm;
28
29#define DEBUG_TYPE "hexagontti"
30
31static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32 cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33
34static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
35 cl::init(true), cl::Hidden,
36 cl::desc("Control lookup table emission on Hexagon target"));
37
38static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
39 cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
40
41// Constant "cost factor" to make floating point operations more expensive
42// in terms of vectorization cost. This isn't the best way, but it should
43// do. Ultimately, the cost should use cycles.
44static const unsigned FloatFactor = 4;
45
46bool HexagonTTIImpl::useHVX() const {
47 return ST.useHVXOps() && HexagonAutoHVX;
48}
49
50unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
51 if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
52 return VTy->getNumElements();
53 assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
54 "Expecting scalar type");
55 return 1;
56}
57
58TargetTransformInfo::PopcntSupportKind
59HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
60 // Return fast hardware support as every input < 64 bits will be promoted
61 // to 64 bits.
62 return TargetTransformInfo::PSK_FastHardware;
63}
64
65// The Hexagon target can unroll loops with run-time trip counts.
66void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
67 TTI::UnrollingPreferences &UP,
68 OptimizationRemarkEmitter *ORE) {
69 UP.Runtime = UP.Partial = true;
70}
71
72void HexagonTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
73 TTI::PeelingPreferences &PP) {
74 BaseT::getPeelingPreferences(L, SE, PP);
75 // Only try to peel innermost loops with small runtime trip counts.
76 if (L && L->isInnermost() && canPeel(L) &&
77 SE.getSmallConstantTripCount(L) == 0 &&
78 SE.getSmallConstantMaxTripCount(L) > 0 &&
79 SE.getSmallConstantMaxTripCount(L) <= 5) {
80 PP.PeelCount = 2;
81 }
82}
83
84TTI::AddressingModeKind
85HexagonTTIImpl::getPreferredAddressingMode(const Loop *L,
86 ScalarEvolution *SE) const {
87 return TTI::AMK_PostIndexed;
88}
89
90/// --- Vector TTI begin ---
91
92unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
93 if (Vector)
94 return useHVX() ? 32 : 0;
95 return 32;
96}
97
98unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
99 return useHVX() ? 2 : 1;
100}
101
102TypeSize
103HexagonTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
104 switch (K) {
105 case TargetTransformInfo::RGK_Scalar:
106 return TypeSize::getFixed(32);
107 case TargetTransformInfo::RGK_FixedWidthVector:
108 return TypeSize::getFixed(getMinVectorRegisterBitWidth());
109 case TargetTransformInfo::RGK_ScalableVector:
110 return TypeSize::getScalable(0);
111 }
112
113 llvm_unreachable("Unsupported register kind");
114}
115
116unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
117 return useHVX() ? ST.getVectorLength()*8 : 32;
118}
119
120ElementCount HexagonTTIImpl::getMinimumVF(unsigned ElemWidth,
121 bool IsScalable) const {
122 assert(!IsScalable && "Scalable VFs are not supported for Hexagon");
123 return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
124}
125
126InstructionCost HexagonTTIImpl::getScalarizationOverhead(
127 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract) {
128 return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
129}
130
131InstructionCost
132HexagonTTIImpl::getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
133 ArrayRef<Type *> Tys) {
134 return BaseT::getOperandsScalarizationOverhead(Args, Tys);
135}
136
137InstructionCost HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
138 ArrayRef<Type *> Tys,
139 TTI::TargetCostKind CostKind) {
140 return BaseT::getCallInstrCost(F, RetTy, Tys, CostKind);
141}
142
143InstructionCost
144HexagonTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
145 TTI::TargetCostKind CostKind) {
146 if (ICA.getID() == Intrinsic::bswap) {
147 std::pair<InstructionCost, MVT> LT =
148 TLI.getTypeLegalizationCost(DL, ICA.getReturnType());
149 return LT.first + 2;
150 }
151 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
152}
153
154InstructionCost HexagonTTIImpl::getAddressComputationCost(Type *Tp,
155 ScalarEvolution *SE,
156 const SCEV *S) {
157 return 0;
158}
159
160InstructionCost HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
161 MaybeAlign Alignment,
162 unsigned AddressSpace,
163 TTI::TargetCostKind CostKind,
164 const Instruction *I) {
165 assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
166 // TODO: Handle other cost kinds.
167 if (CostKind != TTI::TCK_RecipThroughput)
168 return 1;
169
170 if (Opcode == Instruction::Store)
171 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
172 CostKind, I);
173
174 if (Src->isVectorTy()) {
175 VectorType *VecTy = cast<VectorType>(Src);
176 unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedSize();
177 if (useHVX() && ST.isTypeForHVX(VecTy)) {
178 unsigned RegWidth =
179 getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
180 .getFixedSize();
181 assert(RegWidth && "Non-zero vector register width expected");
182 // Cost of HVX loads.
183 if (VecWidth % RegWidth == 0)
184 return VecWidth / RegWidth;
185 // Cost of constructing HVX vector from scalar loads
186 const Align RegAlign(RegWidth / 8);
187 if (!Alignment || *Alignment > RegAlign)
188 Alignment = RegAlign;
189 assert(Alignment);
190 unsigned AlignWidth = 8 * Alignment->value();
191 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
192 return 3 * NumLoads;
193 }
194
195 // Non-HVX vectors.
196 // Add extra cost for floating point types.
197 unsigned Cost =
198 VecTy->getElementType()->isFloatingPointTy() ? FloatFactor : 1;
199
200 // At this point unspecified alignment is considered as Align(1).
201 const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
202 unsigned AlignWidth = 8 * BoundAlignment.value();
203 unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
204 if (Alignment == Align(4) || Alignment == Align(8))
205 return Cost * NumLoads;
206 // Loads of less than 32 bits will need extra inserts to compose a vector.
207 assert(BoundAlignment <= Align(8));
208 unsigned LogA = Log2(BoundAlignment);
209 return (3 - LogA) * Cost * NumLoads;
210 }
211
212 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
213 CostKind, I);
214}
215
216InstructionCost
217HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
218 Align Alignment, unsigned AddressSpace,
219 TTI::TargetCostKind CostKind) {
220 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
221 CostKind);
222}
223
224InstructionCost HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
225 ArrayRef<int> Mask, int Index,
226 Type *SubTp,
227 ArrayRef<const Value *> Args) {
228 return 1;
229}
230
231InstructionCost HexagonTTIImpl::getGatherScatterOpCost(
232 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
233 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
234 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
235 Alignment, CostKind, I);
236}
237
238InstructionCost HexagonTTIImpl::getInterleavedMemoryOpCost(
239 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
240 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
241 bool UseMaskForCond, bool UseMaskForGaps) {
242 if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
243 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
244 Alignment, AddressSpace,
245 CostKind,
246 UseMaskForCond, UseMaskForGaps);
247 return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
248 CostKind);
249}
250
251InstructionCost HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
252 Type *CondTy,
253 CmpInst::Predicate VecPred,
254 TTI::TargetCostKind CostKind,
255 const Instruction *I) {
256 if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
257 std::pair<InstructionCost, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
258 if (Opcode == Instruction::FCmp)
259 return LT.first + FloatFactor * getTypeNumElements(ValTy);
260 }
261 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
262}
263
264InstructionCost HexagonTTIImpl::getArithmeticInstrCost(
265 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
266 TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
267 TTI::OperandValueProperties Opd1PropInfo,
268 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
269 const Instruction *CxtI) {
270 // TODO: Handle more cost kinds.
271 if (CostKind != TTI::TCK_RecipThroughput)
272 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
273 Opd2Info, Opd1PropInfo,
274 Opd2PropInfo, Args, CxtI);
275
276 if (Ty->isVectorTy()) {
277 std::pair<InstructionCost, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
278 if (LT.second.isFloatingPoint())
279 return LT.first + FloatFactor * getTypeNumElements(Ty);
280 }
281 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
282 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
283}
284
285InstructionCost HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
286 Type *SrcTy,
287 TTI::CastContextHint CCH,
288 TTI::TargetCostKind CostKind,
289 const Instruction *I) {
290 if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
291 unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
292 unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
293
294 std::pair<InstructionCost, MVT> SrcLT =
295 TLI.getTypeLegalizationCost(DL, SrcTy);
296 std::pair<InstructionCost, MVT> DstLT =
297 TLI.getTypeLegalizationCost(DL, DstTy);
298 InstructionCost Cost =
299 std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
300 // TODO: Allow non-throughput costs that aren't binary.
301 if (CostKind != TTI::TCK_RecipThroughput)
302 return Cost == 0 ? 0 : 1;
303 return Cost;
304 }
305 return 1;
306}
307
308InstructionCost HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
309 unsigned Index) {
310 Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
311 : Val;
312 if (Opcode == Instruction::InsertElement) {
313 // Need two rotations for non-zero index.
314 unsigned Cost = (Index != 0) ? 2 : 0;
315 if (ElemTy->isIntegerTy(32))
316 return Cost;
317 // If it's not a 32-bit value, there will need to be an extract.
318 return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
319 }
320
321 if (Opcode == Instruction::ExtractElement)
322 return 2;
323
324 return 1;
325}
326
327bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/) {
328 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
329}
330
331bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/) {
332 return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
333}
334
335/// --- Vector TTI end ---
336
337unsigned HexagonTTIImpl::getPrefetchDistance() const {
338 return ST.getL1PrefetchDistance();
339}
340
341unsigned HexagonTTIImpl::getCacheLineSize() const {
342 return ST.getL1CacheLineSize();
343}
344
345InstructionCost HexagonTTIImpl::getUserCost(const User *U,
346 ArrayRef<const Value *> Operands,
347 TTI::TargetCostKind CostKind) {
348 auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
349 if (!CI->isIntegerCast())
350 return false;
351 // Only extensions from an integer type shorter than 32-bit to i32
352 // can be folded into the load.
353 const DataLayout &DL = getDataLayout();
354 unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
355 unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
356 if (DBW != 32 || SBW >= DBW)
357 return false;
358
359 const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
360 // Technically, this code could allow multiple uses of the load, and
361 // check if all the uses are the same extension operation, but this
362 // should be sufficient for most cases.
363 return LI && LI->hasOneUse();
364 };
365
366 if (const CastInst *CI = dyn_cast<const CastInst>(U))
367 if (isCastFoldedIntoLoad(CI))
368 return TargetTransformInfo::TCC_Free;
369 return BaseT::getUserCost(U, Operands, CostKind);
370}
371
372bool HexagonTTIImpl::shouldBuildLookupTables() const {
373 return EmitLookupTables;
374}
375

source code of llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp