1//===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7/// \file
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AMDGPUFixupKinds.h"
11#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12#include "Utils/AMDGPUBaseInfo.h"
13#include "llvm/BinaryFormat/ELF.h"
14#include "llvm/MC/MCAsmBackend.h"
15#include "llvm/MC/MCAssembler.h"
16#include "llvm/MC/MCContext.h"
17#include "llvm/MC/MCFixupKindInfo.h"
18#include "llvm/MC/MCObjectWriter.h"
19#include "llvm/MC/MCSubtargetInfo.h"
20#include "llvm/MC/TargetRegistry.h"
21#include "llvm/Support/EndianStream.h"
22#include "llvm/TargetParser/TargetParser.h"
23
24using namespace llvm;
25using namespace llvm::AMDGPU;
26
27namespace {
28
29class AMDGPUAsmBackend : public MCAsmBackend {
30public:
31 AMDGPUAsmBackend(const Target &T) : MCAsmBackend(llvm::endianness::little) {}
32
33 unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
34
35 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
36 const MCValue &Target, MutableArrayRef<char> Data,
37 uint64_t Value, bool IsResolved,
38 const MCSubtargetInfo *STI) const override;
39 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
40 const MCRelaxableFragment *DF,
41 const MCAsmLayout &Layout) const override;
42
43 void relaxInstruction(MCInst &Inst,
44 const MCSubtargetInfo &STI) const override;
45
46 bool mayNeedRelaxation(const MCInst &Inst,
47 const MCSubtargetInfo &STI) const override;
48
49 unsigned getMinimumNopSize() const override;
50 bool writeNopData(raw_ostream &OS, uint64_t Count,
51 const MCSubtargetInfo *STI) const override;
52
53 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
54 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
55 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
56 const MCValue &Target,
57 const MCSubtargetInfo *STI) override;
58};
59
60} //End anonymous namespace
61
62void AMDGPUAsmBackend::relaxInstruction(MCInst &Inst,
63 const MCSubtargetInfo &STI) const {
64 MCInst Res;
65 unsigned RelaxedOpcode = AMDGPU::getSOPPWithRelaxation(Opcode: Inst.getOpcode());
66 Res.setOpcode(RelaxedOpcode);
67 Res.addOperand(Op: Inst.getOperand(i: 0));
68 Inst = std::move(Res);
69}
70
71bool AMDGPUAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
72 uint64_t Value,
73 const MCRelaxableFragment *DF,
74 const MCAsmLayout &Layout) const {
75 // if the branch target has an offset of x3f this needs to be relaxed to
76 // add a s_nop 0 immediately after branch to effectively increment offset
77 // for hardware workaround in gfx1010
78 return (((int64_t(Value)/4)-1) == 0x3f);
79}
80
81bool AMDGPUAsmBackend::mayNeedRelaxation(const MCInst &Inst,
82 const MCSubtargetInfo &STI) const {
83 if (!STI.hasFeature(AMDGPU::Feature: FeatureOffset3fBug))
84 return false;
85
86 if (AMDGPU::getSOPPWithRelaxation(Opcode: Inst.getOpcode()) >= 0)
87 return true;
88
89 return false;
90}
91
92static unsigned getFixupKindNumBytes(unsigned Kind) {
93 switch (Kind) {
94 case AMDGPU::fixup_si_sopp_br:
95 return 2;
96 case FK_SecRel_1:
97 case FK_Data_1:
98 return 1;
99 case FK_SecRel_2:
100 case FK_Data_2:
101 return 2;
102 case FK_SecRel_4:
103 case FK_Data_4:
104 case FK_PCRel_4:
105 return 4;
106 case FK_SecRel_8:
107 case FK_Data_8:
108 return 8;
109 default:
110 llvm_unreachable("Unknown fixup kind!");
111 }
112}
113
114static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
115 MCContext *Ctx) {
116 int64_t SignedValue = static_cast<int64_t>(Value);
117
118 switch (Fixup.getTargetKind()) {
119 case AMDGPU::fixup_si_sopp_br: {
120 int64_t BrImm = (SignedValue - 4) / 4;
121
122 if (Ctx && !isInt<16>(x: BrImm))
123 Ctx->reportError(L: Fixup.getLoc(), Msg: "branch size exceeds simm16");
124
125 return BrImm;
126 }
127 case FK_Data_1:
128 case FK_Data_2:
129 case FK_Data_4:
130 case FK_Data_8:
131 case FK_PCRel_4:
132 case FK_SecRel_4:
133 return Value;
134 default:
135 llvm_unreachable("unhandled fixup kind");
136 }
137}
138
139void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
140 const MCValue &Target,
141 MutableArrayRef<char> Data, uint64_t Value,
142 bool IsResolved,
143 const MCSubtargetInfo *STI) const {
144 if (Fixup.getKind() >= FirstLiteralRelocationKind)
145 return;
146
147 Value = adjustFixupValue(Fixup, Value, Ctx: &Asm.getContext());
148 if (!Value)
149 return; // Doesn't change encoding.
150
151 MCFixupKindInfo Info = getFixupKindInfo(Kind: Fixup.getKind());
152
153 // Shift the value into position.
154 Value <<= Info.TargetOffset;
155
156 unsigned NumBytes = getFixupKindNumBytes(Kind: Fixup.getKind());
157 uint32_t Offset = Fixup.getOffset();
158 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
159
160 // For each byte of the fragment that the fixup touches, mask in the bits from
161 // the fixup value.
162 for (unsigned i = 0; i != NumBytes; ++i)
163 Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
164}
165
166std::optional<MCFixupKind>
167AMDGPUAsmBackend::getFixupKind(StringRef Name) const {
168 return StringSwitch<std::optional<MCFixupKind>>(Name)
169#define ELF_RELOC(Name, Value) \
170 .Case(#Name, MCFixupKind(FirstLiteralRelocationKind + Value))
171#include "llvm/BinaryFormat/ELFRelocs/AMDGPU.def"
172#undef ELF_RELOC
173 .Default(Value: std::nullopt);
174}
175
176const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
177 MCFixupKind Kind) const {
178 const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
179 // name offset bits flags
180 { .Name: "fixup_si_sopp_br", .TargetOffset: 0, .TargetSize: 16, .Flags: MCFixupKindInfo::FKF_IsPCRel },
181 };
182
183 if (Kind >= FirstLiteralRelocationKind)
184 return MCAsmBackend::getFixupKindInfo(Kind: FK_NONE);
185
186 if (Kind < FirstTargetFixupKind)
187 return MCAsmBackend::getFixupKindInfo(Kind);
188
189 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
190 "Invalid kind!");
191 return Infos[Kind - FirstTargetFixupKind];
192}
193
194bool AMDGPUAsmBackend::shouldForceRelocation(const MCAssembler &,
195 const MCFixup &Fixup,
196 const MCValue &,
197 const MCSubtargetInfo *STI) {
198 return Fixup.getKind() >= FirstLiteralRelocationKind;
199}
200
201unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
202 return 4;
203}
204
205bool AMDGPUAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
206 const MCSubtargetInfo *STI) const {
207 // If the count is not 4-byte aligned, we must be writing data into the text
208 // section (otherwise we have unaligned instructions, and thus have far
209 // bigger problems), so just write zeros instead.
210 OS.write_zeros(NumZeros: Count % 4);
211
212 // We are properly aligned, so write NOPs as requested.
213 Count /= 4;
214
215 // FIXME: R600 support.
216 // s_nop 0
217 const uint32_t Encoded_S_NOP_0 = 0xbf800000;
218
219 for (uint64_t I = 0; I != Count; ++I)
220 support::endian::write<uint32_t>(os&: OS, value: Encoded_S_NOP_0, endian: Endian);
221
222 return true;
223}
224
225//===----------------------------------------------------------------------===//
226// ELFAMDGPUAsmBackend class
227//===----------------------------------------------------------------------===//
228
229namespace {
230
231class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
232 bool Is64Bit;
233 bool HasRelocationAddend;
234 uint8_t OSABI = ELF::ELFOSABI_NONE;
235
236public:
237 ELFAMDGPUAsmBackend(const Target &T, const Triple &TT)
238 : AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
239 HasRelocationAddend(TT.getOS() == Triple::AMDHSA) {
240 switch (TT.getOS()) {
241 case Triple::AMDHSA:
242 OSABI = ELF::ELFOSABI_AMDGPU_HSA;
243 break;
244 case Triple::AMDPAL:
245 OSABI = ELF::ELFOSABI_AMDGPU_PAL;
246 break;
247 case Triple::Mesa3D:
248 OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
249 break;
250 default:
251 break;
252 }
253 }
254
255 std::unique_ptr<MCObjectTargetWriter>
256 createObjectTargetWriter() const override {
257 return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend);
258 }
259};
260
261} // end anonymous namespace
262
263MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
264 const MCSubtargetInfo &STI,
265 const MCRegisterInfo &MRI,
266 const MCTargetOptions &Options) {
267 return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple());
268}
269

source code of llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp