1//===-------- RISCV.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenFunction.h"
14#include "clang/Basic/TargetBuiltins.h"
15#include "llvm/IR/IntrinsicsRISCV.h"
16#include "llvm/TargetParser/RISCVISAInfo.h"
17#include "llvm/TargetParser/RISCVTargetParser.h"
18
19using namespace clang;
20using namespace CodeGen;
21using namespace llvm;
22
23// The 0th bit simulates the `vta` of RVV
24// The 1st bit simulates the `vma` of RVV
25static constexpr unsigned RVV_VTA = 0x1;
26static constexpr unsigned RVV_VMA = 0x2;
27
28// RISC-V Vector builtin helper functions are marked NOINLINE to prevent
29// excessive inlining in CodeGenFunction::EmitRISCVBuiltinExpr's large switch
30// statement, which would significantly increase compilation time.
31static LLVM_ATTRIBUTE_NOINLINE Value *
32emitRVVVLEFFBuiltin(CodeGenFunction *CGF, const CallExpr *E,
33 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
34 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
35 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
36 auto &Builder = CGF->Builder;
37 auto &CGM = CGF->CGM;
38 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
39 if (IsMasked) {
40 // Move mask to right before vl.
41 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 1);
42 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
43 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
44 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
45 IntrinsicTypes = {ResultType, Ops[4]->getType(), Ops[2]->getType()};
46 } else {
47 if (PolicyAttrs & RVV_VTA)
48 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
49 IntrinsicTypes = {ResultType, Ops[3]->getType(), Ops[1]->getType()};
50 }
51 Value *NewVL = Ops[2];
52 Ops.erase(CI: Ops.begin() + 2);
53 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
54 llvm::Value *LoadValue = Builder.CreateCall(Callee: F, Args: Ops, Name: "");
55 llvm::Value *V = Builder.CreateExtractValue(Agg: LoadValue, Idxs: {0});
56 // Store new_vl.
57 clang::CharUnits Align;
58 if (IsMasked)
59 Align = CGM.getNaturalPointeeTypeAlignment(
60 T: E->getArg(Arg: E->getNumArgs() - 2)->getType());
61 else
62 Align = CGM.getNaturalPointeeTypeAlignment(T: E->getArg(Arg: 1)->getType());
63 llvm::Value *Val = Builder.CreateExtractValue(Agg: LoadValue, Idxs: {1});
64 Builder.CreateStore(Val, Addr: Address(NewVL, Val->getType(), Align));
65 return V;
66}
67
68static LLVM_ATTRIBUTE_NOINLINE Value *
69emitRVVVSSEBuiltin(CodeGenFunction *CGF, const CallExpr *E,
70 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
71 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
72 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
73 auto &Builder = CGF->Builder;
74 auto &CGM = CGF->CGM;
75 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
76 if (IsMasked) {
77 // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride,
78 // mask, vl)
79 std::swap(a&: Ops[0], b&: Ops[3]);
80 } else {
81 // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl)
82 std::rotate(first: Ops.begin(), middle: Ops.begin() + 2, last: Ops.begin() + 3);
83 }
84 if (IsMasked)
85 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[4]->getType()};
86 else
87 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
88 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
89 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
90}
91
92static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedStoreBuiltin(
93 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
94 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
95 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
96 auto &Builder = CGF->Builder;
97 auto &CGM = CGF->CGM;
98 llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
99 if (IsMasked) {
100 // Builtin: (mask, ptr, index, value, vl).
101 // Intrinsic: (value, ptr, index, mask, vl)
102 std::swap(a&: Ops[0], b&: Ops[3]);
103 } else {
104 // Builtin: (ptr, index, value, vl).
105 // Intrinsic: (value, ptr, index, vl)
106 std::rotate(first: Ops.begin(), middle: Ops.begin() + 2, last: Ops.begin() + 3);
107 }
108 if (IsMasked)
109 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
110 Ops[4]->getType()};
111 else
112 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
113 Ops[3]->getType()};
114 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
115 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
116}
117
118static LLVM_ATTRIBUTE_NOINLINE Value *
119emitRVVPseudoUnaryBuiltin(CodeGenFunction *CGF, const CallExpr *E,
120 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
121 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
122 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
123 auto &Builder = CGF->Builder;
124 auto &CGM = CGF->CGM;
125 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
126 if (IsMasked) {
127 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 1);
128 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
129 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
130 } else {
131 if (PolicyAttrs & RVV_VTA)
132 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
133 }
134 auto ElemTy = cast<llvm::VectorType>(Val: ResultType)->getElementType();
135 Ops.insert(I: Ops.begin() + 2, Elt: llvm::Constant::getNullValue(Ty: ElemTy));
136 if (IsMasked) {
137 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
138 // maskedoff, op1, op2, mask, vl, policy
139 IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
140 } else {
141 // passthru, op1, op2, vl
142 IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
143 }
144 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
145 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
146}
147
148static LLVM_ATTRIBUTE_NOINLINE Value *
149emitRVVPseudoVNotBuiltin(CodeGenFunction *CGF, const CallExpr *E,
150 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
151 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
152 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
153 auto &Builder = CGF->Builder;
154 auto &CGM = CGF->CGM;
155 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
156 if (IsMasked) {
157 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 1);
158 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
159 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
160 } else {
161 if (PolicyAttrs & RVV_VTA)
162 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
163 }
164 auto ElemTy = cast<llvm::VectorType>(Val: ResultType)->getElementType();
165 Ops.insert(I: Ops.begin() + 2, Elt: llvm::Constant::getAllOnesValue(Ty: ElemTy));
166 if (IsMasked) {
167 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
168 // maskedoff, op1, po2, mask, vl, policy
169 IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
170 } else {
171 // passthru, op1, op2, vl
172 IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
173 }
174 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
175 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
176}
177
178static LLVM_ATTRIBUTE_NOINLINE Value *
179emitRVVPseudoMaskBuiltin(CodeGenFunction *CGF, const CallExpr *E,
180 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
181 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
182 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
183 auto &Builder = CGF->Builder;
184 auto &CGM = CGF->CGM;
185 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
186 // op1, vl
187 IntrinsicTypes = {ResultType, Ops[1]->getType()};
188 Ops.insert(I: Ops.begin() + 1, Elt: Ops[0]);
189 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
190 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
191}
192
193static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVPseudoVFUnaryBuiltin(
194 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
195 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
196 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
197 auto &Builder = CGF->Builder;
198 auto &CGM = CGF->CGM;
199 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
200 if (IsMasked) {
201 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 1);
202 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
203 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
204 Ops.insert(I: Ops.begin() + 2, Elt: Ops[1]);
205 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
206 // maskedoff, op1, op2, mask, vl
207 IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
208 } else {
209 if (PolicyAttrs & RVV_VTA)
210 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
211 // op1, po2, vl
212 IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
213 Ops.insert(I: Ops.begin() + 2, Elt: Ops[1]);
214 }
215 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
216 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
217}
218
219static LLVM_ATTRIBUTE_NOINLINE Value *
220emitRVVPseudoVWCVTBuiltin(CodeGenFunction *CGF, const CallExpr *E,
221 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
222 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
223 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
224 auto &Builder = CGF->Builder;
225 auto &CGM = CGF->CGM;
226 llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
227 if (IsMasked) {
228 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 1);
229 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
230 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
231 } else {
232 if (PolicyAttrs & RVV_VTA)
233 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
234 }
235 auto ElemTy = cast<llvm::VectorType>(Val: Ops[1]->getType())->getElementType();
236 Ops.insert(I: Ops.begin() + 2, Elt: llvm::Constant::getNullValue(Ty: ElemTy));
237 if (IsMasked) {
238 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
239 // maskedoff, op1, op2, mask, vl, policy
240 IntrinsicTypes = {ResultType, Ops[1]->getType(), ElemTy, Ops[4]->getType()};
241 } else {
242 // passtru, op1, op2, vl
243 IntrinsicTypes = {ResultType, Ops[1]->getType(), ElemTy, Ops[3]->getType()};
244 }
245 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
246 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
247}
248
249static LLVM_ATTRIBUTE_NOINLINE Value *
250emitRVVPseudoVNCVTBuiltin(CodeGenFunction *CGF, const CallExpr *E,
251 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
252 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
253 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
254 auto &Builder = CGF->Builder;
255 auto &CGM = CGF->CGM;
256 llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
257 if (IsMasked) {
258 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 1);
259 if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
260 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
261 } else {
262 if (PolicyAttrs & RVV_VTA)
263 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
264 }
265 Ops.insert(I: Ops.begin() + 2,
266 Elt: llvm::Constant::getNullValue(Ty: Ops.back()->getType()));
267 if (IsMasked) {
268 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
269 // maskedoff, op1, xlen, mask, vl
270 IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[4]->getType(),
271 Ops[4]->getType()};
272 } else {
273 // passthru, op1, xlen, vl
274 IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType(),
275 Ops[3]->getType()};
276 }
277 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
278 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
279}
280
281static LLVM_ATTRIBUTE_NOINLINE Value *
282emitRVVVlenbBuiltin(CodeGenFunction *CGF, const CallExpr *E,
283 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
284 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
285 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
286 auto &Builder = CGF->Builder;
287 auto &CGM = CGF->CGM;
288 LLVMContext &Context = CGM.getLLVMContext();
289 llvm::MDBuilder MDHelper(Context);
290 llvm::Metadata *OpsMD[] = {llvm::MDString::get(Context, Str: "vlenb")};
291 llvm::MDNode *RegName = llvm::MDNode::get(Context, MDs: OpsMD);
292 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, MD: RegName);
293 llvm::Function *F =
294 CGM.getIntrinsic(IID: llvm::Intrinsic::read_register, Tys: {CGF->SizeTy});
295 return Builder.CreateCall(Callee: F, Args: Metadata);
296}
297
298static LLVM_ATTRIBUTE_NOINLINE Value *
299emitRVVVsetvliBuiltin(CodeGenFunction *CGF, const CallExpr *E,
300 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
301 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
302 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
303 auto &Builder = CGF->Builder;
304 auto &CGM = CGF->CGM;
305 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: {ResultType});
306 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
307}
308
309static LLVM_ATTRIBUTE_NOINLINE Value *
310emitRVVVSEMaskBuiltin(CodeGenFunction *CGF, const CallExpr *E,
311 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
312 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
313 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
314 auto &Builder = CGF->Builder;
315 auto &CGM = CGF->CGM;
316 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
317 if (IsMasked) {
318 // Builtin: (mask, ptr, value, vl).
319 // Intrinsic: (value, ptr, mask, vl)
320 std::swap(a&: Ops[0], b&: Ops[2]);
321 } else {
322 // Builtin: (ptr, value, vl).
323 // Intrinsic: (value, ptr, vl)
324 std::swap(a&: Ops[0], b&: Ops[1]);
325 }
326 if (IsMasked)
327 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
328 else
329 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType()};
330 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
331 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
332}
333
334static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegLoadTupleBuiltin(
335 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
336 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
337 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
338 auto &Builder = CGF->Builder;
339 auto &CGM = CGF->CGM;
340 llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
341 bool NoPassthru =
342 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
343 (!IsMasked && (PolicyAttrs & RVV_VTA));
344 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
345 if (IsMasked)
346 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[0]->getType(),
347 Ops.back()->getType()};
348 else
349 IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
350 Ops.back()->getType()};
351 if (IsMasked)
352 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 1);
353 if (NoPassthru)
354 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
355 if (IsMasked)
356 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
357 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: SegInstSEW));
358 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
359 llvm::Value *LoadValue = Builder.CreateCall(Callee: F, Args: Ops, Name: "");
360 if (ReturnValue.isNull())
361 return LoadValue;
362 return Builder.CreateStore(Val: LoadValue, Addr: ReturnValue.getValue());
363}
364
365static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegStoreTupleBuiltin(
366 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
367 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
368 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
369 auto &Builder = CGF->Builder;
370 auto &CGM = CGF->CGM;
371 llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
372 // Masked
373 // Builtin: (mask, ptr, v_tuple, vl)
374 // Intrinsic: (tuple, ptr, mask, vl, SegInstSEW)
375 // Unmasked
376 // Builtin: (ptr, v_tuple, vl)
377 // Intrinsic: (tuple, ptr, vl, SegInstSEW)
378 if (IsMasked)
379 std::swap(a&: Ops[0], b&: Ops[2]);
380 else
381 std::swap(a&: Ops[0], b&: Ops[1]);
382 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: SegInstSEW));
383 if (IsMasked)
384 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
385 Ops[3]->getType()};
386 else
387 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType()};
388 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
389 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
390}
391
392static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVUnitStridedSegLoadFFTupleBuiltin(
393 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
394 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
395 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
396 auto &Builder = CGF->Builder;
397 auto &CGM = CGF->CGM;
398 llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
399 bool NoPassthru =
400 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
401 (!IsMasked && (PolicyAttrs & RVV_VTA));
402 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
403 if (IsMasked)
404 IntrinsicTypes = {ResultType, Ops.back()->getType(), Ops[Offset]->getType(),
405 Ops[0]->getType()};
406 else
407 IntrinsicTypes = {ResultType, Ops.back()->getType(),
408 Ops[Offset]->getType()};
409 if (IsMasked)
410 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 1);
411 if (NoPassthru)
412 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
413 if (IsMasked)
414 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
415 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: SegInstSEW));
416 Value *NewVL = Ops[2];
417 Ops.erase(CI: Ops.begin() + 2);
418 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
419 llvm::Value *LoadValue = Builder.CreateCall(Callee: F, Args: Ops, Name: "");
420 // Get alignment from the new vl operand
421 clang::CharUnits Align =
422 CGM.getNaturalPointeeTypeAlignment(T: E->getArg(Arg: Offset + 1)->getType());
423 llvm::Value *ReturnTuple = Builder.CreateExtractValue(Agg: LoadValue, Idxs: 0);
424 // Store new_vl
425 llvm::Value *V = Builder.CreateExtractValue(Agg: LoadValue, Idxs: 1);
426 Builder.CreateStore(Val: V, Addr: Address(NewVL, V->getType(), Align));
427 if (ReturnValue.isNull())
428 return ReturnTuple;
429 return Builder.CreateStore(Val: ReturnTuple, Addr: ReturnValue.getValue());
430}
431
432static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVStridedSegLoadTupleBuiltin(
433 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
434 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
435 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
436 auto &Builder = CGF->Builder;
437 auto &CGM = CGF->CGM;
438 llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
439 bool NoPassthru =
440 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
441 (!IsMasked && (PolicyAttrs & RVV_VTA));
442 unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
443 if (IsMasked)
444 IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops.back()->getType(),
445 Ops[0]->getType()};
446 else
447 IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
448 Ops.back()->getType()};
449 if (IsMasked)
450 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 1);
451 if (NoPassthru)
452 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
453 if (IsMasked)
454 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
455 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: SegInstSEW));
456 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
457 llvm::Value *LoadValue = Builder.CreateCall(Callee: F, Args: Ops, Name: "");
458 if (ReturnValue.isNull())
459 return LoadValue;
460 return Builder.CreateStore(Val: LoadValue, Addr: ReturnValue.getValue());
461}
462
463static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVStridedSegStoreTupleBuiltin(
464 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
465 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
466 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
467 auto &Builder = CGF->Builder;
468 auto &CGM = CGF->CGM;
469 llvm::SmallVector<llvm::Type *, 4> IntrinsicTypes;
470 // Masked
471 // Builtin: (mask, ptr, stride, v_tuple, vl)
472 // Intrinsic: (tuple, ptr, stride, mask, vl, SegInstSEW)
473 // Unmasked
474 // Builtin: (ptr, stride, v_tuple, vl)
475 // Intrinsic: (tuple, ptr, stride, vl, SegInstSEW)
476 if (IsMasked)
477 std::swap(a&: Ops[0], b&: Ops[3]);
478 else
479 std::rotate(first: Ops.begin(), middle: Ops.begin() + 2, last: Ops.begin() + 3);
480 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: SegInstSEW));
481 if (IsMasked)
482 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[4]->getType(),
483 Ops[3]->getType()};
484 else
485 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[3]->getType()};
486 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
487 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
488}
489
490static LLVM_ATTRIBUTE_NOINLINE Value *
491emitRVVAveragingBuiltin(CodeGenFunction *CGF, const CallExpr *E,
492 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
493 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
494 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
495 auto &Builder = CGF->Builder;
496 auto &CGM = CGF->CGM;
497 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
498 // LLVM intrinsic
499 // Unmasked: (passthru, op0, op1, round_mode, vl)
500 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl,
501 // policy)
502
503 bool HasMaskedOff =
504 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
505 (!IsMasked && PolicyAttrs & RVV_VTA));
506
507 if (IsMasked)
508 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 2);
509
510 if (!HasMaskedOff)
511 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
512
513 if (IsMasked)
514 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
515
516 llvm::Function *F = CGM.getIntrinsic(
517 IID: ID, Tys: {ResultType, Ops[2]->getType(), Ops.back()->getType()});
518 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
519}
520
521static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVNarrowingClipBuiltin(
522 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
523 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
524 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
525 auto &Builder = CGF->Builder;
526 auto &CGM = CGF->CGM;
527 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
528 // LLVM intrinsic
529 // Unmasked: (passthru, op0, op1, round_mode, vl)
530 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl,
531 // policy)
532
533 bool HasMaskedOff =
534 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
535 (!IsMasked && PolicyAttrs & RVV_VTA));
536
537 if (IsMasked)
538 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 2);
539
540 if (!HasMaskedOff)
541 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
542
543 if (IsMasked)
544 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
545
546 llvm::Function *F =
547 CGM.getIntrinsic(IID: ID, Tys: {ResultType, Ops[1]->getType(), Ops[2]->getType(),
548 Ops.back()->getType()});
549 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
550}
551
552static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingPointBuiltin(
553 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
554 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
555 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
556 auto &Builder = CGF->Builder;
557 auto &CGM = CGF->CGM;
558 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
559 // LLVM intrinsic
560 // Unmasked: (passthru, op0, op1, round_mode, vl)
561 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
562
563 bool HasMaskedOff =
564 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
565 (!IsMasked && PolicyAttrs & RVV_VTA));
566 bool HasRoundModeOp =
567 IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
568 : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
569
570 if (!HasRoundModeOp)
571 Ops.insert(I: Ops.end() - 1,
572 Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: 7)); // frm
573
574 if (IsMasked)
575 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 2);
576
577 if (!HasMaskedOff)
578 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
579
580 if (IsMasked)
581 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
582
583 llvm::Function *F = CGM.getIntrinsic(
584 IID: ID, Tys: {ResultType, Ops[2]->getType(), Ops.back()->getType()});
585 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
586}
587
588static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVWideningFloatingPointBuiltin(
589 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
590 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
591 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
592 auto &Builder = CGF->Builder;
593 auto &CGM = CGF->CGM;
594 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
595 // LLVM intrinsic
596 // Unmasked: (passthru, op0, op1, round_mode, vl)
597 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
598
599 bool HasMaskedOff =
600 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
601 (!IsMasked && PolicyAttrs & RVV_VTA));
602 bool HasRoundModeOp =
603 IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
604 : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
605
606 if (!HasRoundModeOp)
607 Ops.insert(I: Ops.end() - 1,
608 Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: 7)); // frm
609
610 if (IsMasked)
611 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 2);
612
613 if (!HasMaskedOff)
614 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
615
616 if (IsMasked)
617 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
618
619 llvm::Function *F =
620 CGM.getIntrinsic(IID: ID, Tys: {ResultType, Ops[1]->getType(), Ops[2]->getType(),
621 Ops.back()->getType()});
622 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
623}
624
625static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedSegLoadTupleBuiltin(
626 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
627 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
628 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
629 auto &Builder = CGF->Builder;
630 auto &CGM = CGF->CGM;
631 llvm::SmallVector<llvm::Type *, 5> IntrinsicTypes;
632
633 bool NoPassthru =
634 (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
635 (!IsMasked && (PolicyAttrs & RVV_VTA));
636
637 if (IsMasked)
638 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 1);
639 if (NoPassthru)
640 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
641
642 if (IsMasked)
643 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
644 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: SegInstSEW));
645
646 if (IsMasked)
647 IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType(),
648 Ops[3]->getType(), Ops[4]->getType()};
649 else
650 IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType(),
651 Ops[3]->getType()};
652 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
653 llvm::Value *LoadValue = Builder.CreateCall(Callee: F, Args: Ops, Name: "");
654
655 if (ReturnValue.isNull())
656 return LoadValue;
657 return Builder.CreateStore(Val: LoadValue, Addr: ReturnValue.getValue());
658}
659
660static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVIndexedSegStoreTupleBuiltin(
661 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
662 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
663 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
664 auto &Builder = CGF->Builder;
665 auto &CGM = CGF->CGM;
666 llvm::SmallVector<llvm::Type *, 5> IntrinsicTypes;
667 // Masked
668 // Builtin: (mask, ptr, index, v_tuple, vl)
669 // Intrinsic: (tuple, ptr, index, mask, vl, SegInstSEW)
670 // Unmasked
671 // Builtin: (ptr, index, v_tuple, vl)
672 // Intrinsic: (tuple, ptr, index, vl, SegInstSEW)
673
674 if (IsMasked)
675 std::swap(a&: Ops[0], b&: Ops[3]);
676 else
677 std::rotate(first: Ops.begin(), middle: Ops.begin() + 2, last: Ops.begin() + 3);
678
679 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: SegInstSEW));
680
681 if (IsMasked)
682 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
683 Ops[3]->getType(), Ops[4]->getType()};
684 else
685 IntrinsicTypes = {Ops[0]->getType(), Ops[1]->getType(), Ops[2]->getType(),
686 Ops[3]->getType()};
687 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
688 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
689}
690
691static LLVM_ATTRIBUTE_NOINLINE Value *
692emitRVVFMABuiltin(CodeGenFunction *CGF, const CallExpr *E,
693 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
694 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
695 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
696 auto &Builder = CGF->Builder;
697 auto &CGM = CGF->CGM;
698 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
699 // LLVM intrinsic
700 // Unmasked: (vector_in, vector_in/scalar_in, vector_in, round_mode,
701 // vl, policy)
702 // Masked: (vector_in, vector_in/scalar_in, vector_in, mask, frm,
703 // vl, policy)
704
705 bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
706
707 if (!HasRoundModeOp)
708 Ops.insert(I: Ops.end() - 1,
709 Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: 7)); // frm
710
711 if (IsMasked)
712 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 2);
713
714 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
715
716 llvm::Function *F = CGM.getIntrinsic(
717 IID: ID, Tys: {ResultType, Ops[1]->getType(), Ops.back()->getType()});
718 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
719}
720
721static LLVM_ATTRIBUTE_NOINLINE Value *
722emitRVVWideningFMABuiltin(CodeGenFunction *CGF, const CallExpr *E,
723 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
724 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
725 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
726 auto &Builder = CGF->Builder;
727 auto &CGM = CGF->CGM;
728 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
729 // LLVM intrinsic
730 // Unmasked: (vector_in, vector_in/scalar_in, vector_in, round_mode, vl,
731 // policy) Masked: (vector_in, vector_in/scalar_in, vector_in, mask, frm,
732 // vl, policy)
733
734 bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
735
736 if (!HasRoundModeOp)
737 Ops.insert(I: Ops.end() - 1,
738 Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: 7)); // frm
739
740 if (IsMasked)
741 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.begin() + 4);
742
743 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
744
745 llvm::Function *F =
746 CGM.getIntrinsic(IID: ID, Tys: {ResultType, Ops[1]->getType(), Ops[2]->getType(),
747 Ops.back()->getType()});
748 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
749}
750
751static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingUnaryBuiltin(
752 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
753 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
754 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
755 auto &Builder = CGF->Builder;
756 auto &CGM = CGF->CGM;
757 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
758 // LLVM intrinsic
759 // Unmasked: (passthru, op0, round_mode, vl)
760 // Masked: (passthru, op0, mask, frm, vl, policy)
761
762 bool HasMaskedOff =
763 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
764 (!IsMasked && PolicyAttrs & RVV_VTA));
765 bool HasRoundModeOp =
766 IsMasked ? (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4)
767 : (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
768
769 if (!HasRoundModeOp)
770 Ops.insert(I: Ops.end() - 1,
771 Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: 7)); // frm
772
773 if (IsMasked)
774 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 2);
775
776 if (!HasMaskedOff)
777 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
778
779 if (IsMasked)
780 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
781
782 IntrinsicTypes = {ResultType, Ops.back()->getType()};
783 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
784 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
785}
786
787static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingConvBuiltin(
788 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
789 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
790 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
791 auto &Builder = CGF->Builder;
792 auto &CGM = CGF->CGM;
793 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
794 // LLVM intrinsic
795 // Unmasked: (passthru, op0, frm, vl)
796 // Masked: (passthru, op0, mask, frm, vl, policy)
797 bool HasMaskedOff =
798 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
799 (!IsMasked && PolicyAttrs & RVV_VTA));
800 bool HasRoundModeOp =
801 IsMasked ? (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4)
802 : (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
803
804 if (!HasRoundModeOp)
805 Ops.insert(I: Ops.end() - 1,
806 Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: 7)); // frm
807
808 if (IsMasked)
809 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 2);
810
811 if (!HasMaskedOff)
812 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
813
814 if (IsMasked)
815 Ops.push_back(Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: PolicyAttrs));
816
817 llvm::Function *F = CGM.getIntrinsic(
818 IID: ID, Tys: {ResultType, Ops[1]->getType(), Ops.back()->getType()});
819 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
820}
821
822static LLVM_ATTRIBUTE_NOINLINE Value *emitRVVFloatingReductionBuiltin(
823 CodeGenFunction *CGF, const CallExpr *E, ReturnValueSlot ReturnValue,
824 llvm::Type *ResultType, Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
825 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
826 auto &Builder = CGF->Builder;
827 auto &CGM = CGF->CGM;
828 llvm::SmallVector<llvm::Type *, 3> IntrinsicTypes;
829 // LLVM intrinsic
830 // Unmasked: (passthru, op0, op1, round_mode, vl)
831 // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
832
833 bool HasMaskedOff =
834 !((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
835 (!IsMasked && PolicyAttrs & RVV_VTA));
836 bool HasRoundModeOp =
837 IsMasked ? (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5)
838 : (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
839
840 if (!HasRoundModeOp)
841 Ops.insert(I: Ops.end() - 1,
842 Elt: ConstantInt::get(Ty: Ops.back()->getType(), V: 7)); // frm
843
844 if (IsMasked)
845 std::rotate(first: Ops.begin(), middle: Ops.begin() + 1, last: Ops.end() - 2);
846
847 if (!HasMaskedOff)
848 Ops.insert(I: Ops.begin(), Elt: llvm::PoisonValue::get(T: ResultType));
849
850 llvm::Function *F = CGM.getIntrinsic(
851 IID: ID, Tys: {ResultType, Ops[1]->getType(), Ops.back()->getType()});
852 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
853}
854
855static LLVM_ATTRIBUTE_NOINLINE Value *
856emitRVVReinterpretBuiltin(CodeGenFunction *CGF, const CallExpr *E,
857 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
858 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
859 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
860 auto &Builder = CGF->Builder;
861 auto &CGM = CGF->CGM;
862
863 if (ResultType->isIntOrIntVectorTy(BitWidth: 1) ||
864 Ops[0]->getType()->isIntOrIntVectorTy(BitWidth: 1)) {
865 assert(isa<ScalableVectorType>(ResultType) &&
866 isa<ScalableVectorType>(Ops[0]->getType()));
867
868 LLVMContext &Context = CGM.getLLVMContext();
869 ScalableVectorType *Boolean64Ty =
870 ScalableVectorType::get(ElementType: llvm::Type::getInt1Ty(C&: Context), MinNumElts: 64);
871
872 if (ResultType->isIntOrIntVectorTy(BitWidth: 1)) {
873 // Casting from m1 vector integer -> vector boolean
874 // Ex: <vscale x 8 x i8>
875 // --(bitcast)--------> <vscale x 64 x i1>
876 // --(vector_extract)-> <vscale x 8 x i1>
877 llvm::Value *BitCast = Builder.CreateBitCast(V: Ops[0], DestTy: Boolean64Ty);
878 return Builder.CreateExtractVector(DstType: ResultType, SrcVec: BitCast,
879 Idx: ConstantInt::get(Ty: CGF->Int64Ty, V: 0));
880 } else {
881 // Casting from vector boolean -> m1 vector integer
882 // Ex: <vscale x 1 x i1>
883 // --(vector_insert)-> <vscale x 64 x i1>
884 // --(bitcast)-------> <vscale x 8 x i8>
885 llvm::Value *Boolean64Val = Builder.CreateInsertVector(
886 DstType: Boolean64Ty, SrcVec: llvm::PoisonValue::get(T: Boolean64Ty), SubVec: Ops[0],
887 Idx: ConstantInt::get(Ty: CGF->Int64Ty, V: 0));
888 return Builder.CreateBitCast(V: Boolean64Val, DestTy: ResultType);
889 }
890 }
891 return Builder.CreateBitCast(V: Ops[0], DestTy: ResultType);
892}
893
894static LLVM_ATTRIBUTE_NOINLINE Value *
895emitRVVGetBuiltin(CodeGenFunction *CGF, const CallExpr *E,
896 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
897 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
898 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
899 auto &Builder = CGF->Builder;
900 auto *VecTy = cast<ScalableVectorType>(Val: ResultType);
901 if (auto *OpVecTy = dyn_cast<ScalableVectorType>(Val: Ops[0]->getType())) {
902 unsigned MaxIndex =
903 OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
904 assert(isPowerOf2_32(MaxIndex));
905 // Mask to only valid indices.
906 Ops[1] = Builder.CreateZExt(V: Ops[1], DestTy: Builder.getInt64Ty());
907 Ops[1] = Builder.CreateAnd(LHS: Ops[1], RHS: MaxIndex - 1);
908 Ops[1] =
909 Builder.CreateMul(LHS: Ops[1], RHS: ConstantInt::get(Ty: Ops[1]->getType(),
910 V: VecTy->getMinNumElements()));
911 return Builder.CreateExtractVector(DstType: ResultType, SrcVec: Ops[0], Idx: Ops[1]);
912 }
913
914 return Builder.CreateIntrinsic(
915 ID: Intrinsic::riscv_tuple_extract, Types: {ResultType, Ops[0]->getType()},
916 Args: {Ops[0], Builder.CreateTrunc(V: Ops[1], DestTy: Builder.getInt32Ty())});
917}
918
919static LLVM_ATTRIBUTE_NOINLINE Value *
920emitRVVSetBuiltin(CodeGenFunction *CGF, const CallExpr *E,
921 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
922 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
923 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
924 auto &Builder = CGF->Builder;
925 if (auto *ResVecTy = dyn_cast<ScalableVectorType>(Val: ResultType)) {
926 auto *VecTy = cast<ScalableVectorType>(Val: Ops[2]->getType());
927 unsigned MaxIndex =
928 ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
929 assert(isPowerOf2_32(MaxIndex));
930 // Mask to only valid indices.
931 Ops[1] = Builder.CreateZExt(V: Ops[1], DestTy: Builder.getInt64Ty());
932 Ops[1] = Builder.CreateAnd(LHS: Ops[1], RHS: MaxIndex - 1);
933 Ops[1] =
934 Builder.CreateMul(LHS: Ops[1], RHS: ConstantInt::get(Ty: Ops[1]->getType(),
935 V: VecTy->getMinNumElements()));
936 return Builder.CreateInsertVector(DstType: ResultType, SrcVec: Ops[0], SubVec: Ops[2], Idx: Ops[1]);
937 }
938
939 return Builder.CreateIntrinsic(
940 ID: Intrinsic::riscv_tuple_insert, Types: {ResultType, Ops[2]->getType()},
941 Args: {Ops[0], Ops[2], Builder.CreateTrunc(V: Ops[1], DestTy: Builder.getInt32Ty())});
942}
943
944static LLVM_ATTRIBUTE_NOINLINE Value *
945emitRVVCreateBuiltin(CodeGenFunction *CGF, const CallExpr *E,
946 ReturnValueSlot ReturnValue, llvm::Type *ResultType,
947 Intrinsic::ID ID, SmallVectorImpl<Value *> &Ops,
948 int PolicyAttrs, bool IsMasked, unsigned SegInstSEW) {
949 auto &Builder = CGF->Builder;
950 llvm::Value *ReturnVector = llvm::PoisonValue::get(T: ResultType);
951 auto *VecTy = cast<ScalableVectorType>(Val: Ops[0]->getType());
952 for (unsigned I = 0, N = Ops.size(); I < N; ++I) {
953 if (isa<ScalableVectorType>(Val: ResultType)) {
954 llvm::Value *Idx = ConstantInt::get(Ty: Builder.getInt64Ty(),
955 V: VecTy->getMinNumElements() * I);
956 ReturnVector =
957 Builder.CreateInsertVector(DstType: ResultType, SrcVec: ReturnVector, SubVec: Ops[I], Idx);
958 } else {
959 llvm::Value *Idx = ConstantInt::get(Ty: Builder.getInt32Ty(), V: I);
960 ReturnVector = Builder.CreateIntrinsic(ID: Intrinsic::riscv_tuple_insert,
961 Types: {ResultType, Ops[I]->getType()},
962 Args: {ReturnVector, Ops[I], Idx});
963 }
964 }
965 return ReturnVector;
966}
967
968Value *CodeGenFunction::EmitRISCVCpuInit() {
969 llvm::FunctionType *FTy = llvm::FunctionType::get(Result: VoidTy, Params: {VoidPtrTy}, isVarArg: false);
970 llvm::FunctionCallee Func =
971 CGM.CreateRuntimeFunction(Ty: FTy, Name: "__init_riscv_feature_bits");
972 auto *CalleeGV = cast<llvm::GlobalValue>(Val: Func.getCallee());
973 CalleeGV->setDSOLocal(true);
974 CalleeGV->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
975 return Builder.CreateCall(Callee: Func, Args: {llvm::ConstantPointerNull::get(T: VoidPtrTy)});
976}
977
978Value *CodeGenFunction::EmitRISCVCpuSupports(const CallExpr *E) {
979
980 const Expr *FeatureExpr = E->getArg(Arg: 0)->IgnoreParenCasts();
981 StringRef FeatureStr = cast<StringLiteral>(Val: FeatureExpr)->getString();
982 if (!getContext().getTargetInfo().validateCpuSupports(Name: FeatureStr))
983 return Builder.getFalse();
984
985 return EmitRISCVCpuSupports(FeaturesStrs: ArrayRef<StringRef>(FeatureStr));
986}
987
988static Value *loadRISCVFeatureBits(unsigned Index, CGBuilderTy &Builder,
989 CodeGenModule &CGM) {
990 llvm::Type *Int32Ty = Builder.getInt32Ty();
991 llvm::Type *Int64Ty = Builder.getInt64Ty();
992 llvm::ArrayType *ArrayOfInt64Ty =
993 llvm::ArrayType::get(ElementType: Int64Ty, NumElements: llvm::RISCVISAInfo::FeatureBitSize);
994 llvm::Type *StructTy = llvm::StructType::get(elt1: Int32Ty, elts: ArrayOfInt64Ty);
995 llvm::Constant *RISCVFeaturesBits =
996 CGM.CreateRuntimeVariable(Ty: StructTy, Name: "__riscv_feature_bits");
997 cast<llvm::GlobalValue>(Val: RISCVFeaturesBits)->setDSOLocal(true);
998 Value *IndexVal = llvm::ConstantInt::get(Ty: Int32Ty, V: Index);
999 llvm::Value *GEPIndices[] = {Builder.getInt32(C: 0), Builder.getInt32(C: 1),
1000 IndexVal};
1001 Value *Ptr =
1002 Builder.CreateInBoundsGEP(Ty: StructTy, Ptr: RISCVFeaturesBits, IdxList: GEPIndices);
1003 Value *FeaturesBit =
1004 Builder.CreateAlignedLoad(Ty: Int64Ty, Addr: Ptr, Align: CharUnits::fromQuantity(Quantity: 8));
1005 return FeaturesBit;
1006}
1007
1008Value *CodeGenFunction::EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs) {
1009 const unsigned RISCVFeatureLength = llvm::RISCVISAInfo::FeatureBitSize;
1010 uint64_t RequireBitMasks[RISCVFeatureLength] = {0};
1011
1012 for (auto Feat : FeaturesStrs) {
1013 auto [GroupID, BitPos] = RISCVISAInfo::getRISCVFeaturesBitsInfo(Ext: Feat);
1014
1015 // If there isn't BitPos for this feature, skip this version.
1016 // It also report the warning to user during compilation.
1017 if (BitPos == -1)
1018 return Builder.getFalse();
1019
1020 RequireBitMasks[GroupID] |= (1ULL << BitPos);
1021 }
1022
1023 Value *Result = nullptr;
1024 for (unsigned Idx = 0; Idx < RISCVFeatureLength; Idx++) {
1025 if (RequireBitMasks[Idx] == 0)
1026 continue;
1027
1028 Value *Mask = Builder.getInt64(C: RequireBitMasks[Idx]);
1029 Value *Bitset =
1030 Builder.CreateAnd(LHS: loadRISCVFeatureBits(Index: Idx, Builder, CGM), RHS: Mask);
1031 Value *CmpV = Builder.CreateICmpEQ(LHS: Bitset, RHS: Mask);
1032 Result = (!Result) ? CmpV : Builder.CreateAnd(LHS: Result, RHS: CmpV);
1033 }
1034
1035 assert(Result && "Should have value here.");
1036
1037 return Result;
1038}
1039
1040Value *CodeGenFunction::EmitRISCVCpuIs(const CallExpr *E) {
1041 const Expr *CPUExpr = E->getArg(Arg: 0)->IgnoreParenCasts();
1042 StringRef CPUStr = cast<clang::StringLiteral>(Val: CPUExpr)->getString();
1043 return EmitRISCVCpuIs(CPUStr);
1044}
1045
1046Value *CodeGenFunction::EmitRISCVCpuIs(StringRef CPUStr) {
1047 llvm::Type *Int32Ty = Builder.getInt32Ty();
1048 llvm::Type *Int64Ty = Builder.getInt64Ty();
1049 llvm::StructType *StructTy = llvm::StructType::get(elt1: Int32Ty, elts: Int64Ty, elts: Int64Ty);
1050 llvm::Constant *RISCVCPUModel =
1051 CGM.CreateRuntimeVariable(Ty: StructTy, Name: "__riscv_cpu_model");
1052 cast<llvm::GlobalValue>(Val: RISCVCPUModel)->setDSOLocal(true);
1053
1054 auto loadRISCVCPUID = [&](unsigned Index) {
1055 Value *Ptr = Builder.CreateStructGEP(Ty: StructTy, Ptr: RISCVCPUModel, Idx: Index);
1056 Value *CPUID = Builder.CreateAlignedLoad(Ty: StructTy->getTypeAtIndex(N: Index),
1057 Ptr, Align: llvm::MaybeAlign());
1058 return CPUID;
1059 };
1060
1061 const llvm::RISCV::CPUModel Model = llvm::RISCV::getCPUModel(CPU: CPUStr);
1062
1063 // Compare mvendorid.
1064 Value *VendorID = loadRISCVCPUID(0);
1065 Value *Result =
1066 Builder.CreateICmpEQ(LHS: VendorID, RHS: Builder.getInt32(C: Model.MVendorID));
1067
1068 // Compare marchid.
1069 Value *ArchID = loadRISCVCPUID(1);
1070 Result = Builder.CreateAnd(
1071 LHS: Result, RHS: Builder.CreateICmpEQ(LHS: ArchID, RHS: Builder.getInt64(C: Model.MArchID)));
1072
1073 // Compare mimpid.
1074 Value *ImpID = loadRISCVCPUID(2);
1075 Result = Builder.CreateAnd(
1076 LHS: Result, RHS: Builder.CreateICmpEQ(LHS: ImpID, RHS: Builder.getInt64(C: Model.MImpID)));
1077
1078 return Result;
1079}
1080
1081Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
1082 const CallExpr *E,
1083 ReturnValueSlot ReturnValue) {
1084
1085 if (BuiltinID == Builtin::BI__builtin_cpu_supports)
1086 return EmitRISCVCpuSupports(E);
1087 if (BuiltinID == Builtin::BI__builtin_cpu_init)
1088 return EmitRISCVCpuInit();
1089 if (BuiltinID == Builtin::BI__builtin_cpu_is)
1090 return EmitRISCVCpuIs(E);
1091
1092 SmallVector<Value *, 4> Ops;
1093 llvm::Type *ResultType = ConvertType(T: E->getType());
1094
1095 // Find out if any arguments are required to be integer constant expressions.
1096 unsigned ICEArguments = 0;
1097 ASTContext::GetBuiltinTypeError Error;
1098 getContext().GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments);
1099 if (Error == ASTContext::GE_Missing_type) {
1100 // Vector intrinsics don't have a type string.
1101 assert(BuiltinID >= clang::RISCV::FirstRVVBuiltin &&
1102 BuiltinID <= clang::RISCV::LastRVVBuiltin);
1103 ICEArguments = 0;
1104 if (BuiltinID == RISCVVector::BI__builtin_rvv_vget_v ||
1105 BuiltinID == RISCVVector::BI__builtin_rvv_vset_v)
1106 ICEArguments = 1 << 1;
1107 } else {
1108 assert(Error == ASTContext::GE_None && "Unexpected error");
1109 }
1110
1111 if (BuiltinID == RISCV::BI__builtin_riscv_ntl_load)
1112 ICEArguments |= (1 << 1);
1113 if (BuiltinID == RISCV::BI__builtin_riscv_ntl_store)
1114 ICEArguments |= (1 << 2);
1115
1116 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
1117 // Handle aggregate argument, namely RVV tuple types in segment load/store
1118 if (hasAggregateEvaluationKind(T: E->getArg(Arg: i)->getType())) {
1119 LValue L = EmitAggExprToLValue(E: E->getArg(Arg: i));
1120 llvm::Value *AggValue = Builder.CreateLoad(Addr: L.getAddress());
1121 Ops.push_back(Elt: AggValue);
1122 continue;
1123 }
1124 Ops.push_back(Elt: EmitScalarOrConstFoldImmArg(ICEArguments, Idx: i, E));
1125 }
1126
1127 Intrinsic::ID ID = Intrinsic::not_intrinsic;
1128 int PolicyAttrs = 0;
1129 bool IsMasked = false;
1130 // This is used by segment load/store to determine it's llvm type.
1131 unsigned SegInstSEW = 8;
1132
1133 // Required for overloaded intrinsics.
1134 llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
1135 switch (BuiltinID) {
1136 default: llvm_unreachable("unexpected builtin ID");
1137 case RISCV::BI__builtin_riscv_orc_b_32:
1138 case RISCV::BI__builtin_riscv_orc_b_64:
1139 case RISCV::BI__builtin_riscv_clmul_32:
1140 case RISCV::BI__builtin_riscv_clmul_64:
1141 case RISCV::BI__builtin_riscv_clmulh_32:
1142 case RISCV::BI__builtin_riscv_clmulh_64:
1143 case RISCV::BI__builtin_riscv_clmulr_32:
1144 case RISCV::BI__builtin_riscv_clmulr_64:
1145 case RISCV::BI__builtin_riscv_xperm4_32:
1146 case RISCV::BI__builtin_riscv_xperm4_64:
1147 case RISCV::BI__builtin_riscv_xperm8_32:
1148 case RISCV::BI__builtin_riscv_xperm8_64:
1149 case RISCV::BI__builtin_riscv_brev8_32:
1150 case RISCV::BI__builtin_riscv_brev8_64:
1151 case RISCV::BI__builtin_riscv_zip_32:
1152 case RISCV::BI__builtin_riscv_unzip_32: {
1153 switch (BuiltinID) {
1154 default: llvm_unreachable("unexpected builtin ID");
1155 // Zbb
1156 case RISCV::BI__builtin_riscv_orc_b_32:
1157 case RISCV::BI__builtin_riscv_orc_b_64:
1158 ID = Intrinsic::riscv_orc_b;
1159 break;
1160
1161 // Zbc
1162 case RISCV::BI__builtin_riscv_clmul_32:
1163 case RISCV::BI__builtin_riscv_clmul_64:
1164 ID = Intrinsic::riscv_clmul;
1165 break;
1166 case RISCV::BI__builtin_riscv_clmulh_32:
1167 case RISCV::BI__builtin_riscv_clmulh_64:
1168 ID = Intrinsic::riscv_clmulh;
1169 break;
1170 case RISCV::BI__builtin_riscv_clmulr_32:
1171 case RISCV::BI__builtin_riscv_clmulr_64:
1172 ID = Intrinsic::riscv_clmulr;
1173 break;
1174
1175 // Zbkx
1176 case RISCV::BI__builtin_riscv_xperm8_32:
1177 case RISCV::BI__builtin_riscv_xperm8_64:
1178 ID = Intrinsic::riscv_xperm8;
1179 break;
1180 case RISCV::BI__builtin_riscv_xperm4_32:
1181 case RISCV::BI__builtin_riscv_xperm4_64:
1182 ID = Intrinsic::riscv_xperm4;
1183 break;
1184
1185 // Zbkb
1186 case RISCV::BI__builtin_riscv_brev8_32:
1187 case RISCV::BI__builtin_riscv_brev8_64:
1188 ID = Intrinsic::riscv_brev8;
1189 break;
1190 case RISCV::BI__builtin_riscv_zip_32:
1191 ID = Intrinsic::riscv_zip;
1192 break;
1193 case RISCV::BI__builtin_riscv_unzip_32:
1194 ID = Intrinsic::riscv_unzip;
1195 break;
1196 }
1197
1198 IntrinsicTypes = {ResultType};
1199 break;
1200 }
1201
1202 // Zk builtins
1203
1204 // Zknh
1205 case RISCV::BI__builtin_riscv_sha256sig0:
1206 ID = Intrinsic::riscv_sha256sig0;
1207 break;
1208 case RISCV::BI__builtin_riscv_sha256sig1:
1209 ID = Intrinsic::riscv_sha256sig1;
1210 break;
1211 case RISCV::BI__builtin_riscv_sha256sum0:
1212 ID = Intrinsic::riscv_sha256sum0;
1213 break;
1214 case RISCV::BI__builtin_riscv_sha256sum1:
1215 ID = Intrinsic::riscv_sha256sum1;
1216 break;
1217
1218 // Zksed
1219 case RISCV::BI__builtin_riscv_sm4ks:
1220 ID = Intrinsic::riscv_sm4ks;
1221 break;
1222 case RISCV::BI__builtin_riscv_sm4ed:
1223 ID = Intrinsic::riscv_sm4ed;
1224 break;
1225
1226 // Zksh
1227 case RISCV::BI__builtin_riscv_sm3p0:
1228 ID = Intrinsic::riscv_sm3p0;
1229 break;
1230 case RISCV::BI__builtin_riscv_sm3p1:
1231 ID = Intrinsic::riscv_sm3p1;
1232 break;
1233
1234 case RISCV::BI__builtin_riscv_clz_32:
1235 case RISCV::BI__builtin_riscv_clz_64: {
1236 Function *F = CGM.getIntrinsic(IID: Intrinsic::ctlz, Tys: Ops[0]->getType());
1237 Value *Result = Builder.CreateCall(Callee: F, Args: {Ops[0], Builder.getInt1(V: false)});
1238 if (Result->getType() != ResultType)
1239 Result =
1240 Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/ false, Name: "cast");
1241 return Result;
1242 }
1243 case RISCV::BI__builtin_riscv_ctz_32:
1244 case RISCV::BI__builtin_riscv_ctz_64: {
1245 Function *F = CGM.getIntrinsic(IID: Intrinsic::cttz, Tys: Ops[0]->getType());
1246 Value *Result = Builder.CreateCall(Callee: F, Args: {Ops[0], Builder.getInt1(V: false)});
1247 if (Result->getType() != ResultType)
1248 Result =
1249 Builder.CreateIntCast(V: Result, DestTy: ResultType, /*isSigned*/ false, Name: "cast");
1250 return Result;
1251 }
1252
1253 // Zihintntl
1254 case RISCV::BI__builtin_riscv_ntl_load: {
1255 llvm::Type *ResTy = ConvertType(T: E->getType());
1256 unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
1257 if (Ops.size() == 2)
1258 DomainVal = cast<ConstantInt>(Val: Ops[1])->getZExtValue();
1259
1260 llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
1261 Context&: getLLVMContext(),
1262 MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: DomainVal)));
1263 llvm::MDNode *NontemporalNode = llvm::MDNode::get(
1264 Context&: getLLVMContext(), MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1)));
1265
1266 int Width;
1267 if(ResTy->isScalableTy()) {
1268 const ScalableVectorType *SVTy = cast<ScalableVectorType>(Val: ResTy);
1269 llvm::Type *ScalarTy = ResTy->getScalarType();
1270 Width = ScalarTy->getPrimitiveSizeInBits() *
1271 SVTy->getElementCount().getKnownMinValue();
1272 } else
1273 Width = ResTy->getPrimitiveSizeInBits();
1274 LoadInst *Load = Builder.CreateLoad(
1275 Addr: Address(Ops[0], ResTy, CharUnits::fromQuantity(Quantity: Width / 8)));
1276
1277 Load->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node: NontemporalNode);
1278 Load->setMetadata(KindID: CGM.getModule().getMDKindID(Name: "riscv-nontemporal-domain"),
1279 Node: RISCVDomainNode);
1280
1281 return Load;
1282 }
1283 case RISCV::BI__builtin_riscv_ntl_store: {
1284 unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
1285 if (Ops.size() == 3)
1286 DomainVal = cast<ConstantInt>(Val: Ops[2])->getZExtValue();
1287
1288 llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
1289 Context&: getLLVMContext(),
1290 MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: DomainVal)));
1291 llvm::MDNode *NontemporalNode = llvm::MDNode::get(
1292 Context&: getLLVMContext(), MDs: llvm::ConstantAsMetadata::get(C: Builder.getInt32(C: 1)));
1293
1294 StoreInst *Store = Builder.CreateDefaultAlignedStore(Val: Ops[1], Addr: Ops[0]);
1295 Store->setMetadata(KindID: llvm::LLVMContext::MD_nontemporal, Node: NontemporalNode);
1296 Store->setMetadata(KindID: CGM.getModule().getMDKindID(Name: "riscv-nontemporal-domain"),
1297 Node: RISCVDomainNode);
1298
1299 return Store;
1300 }
1301 // Zihintpause
1302 case RISCV::BI__builtin_riscv_pause: {
1303 llvm::Function *Fn = CGM.getIntrinsic(IID: llvm::Intrinsic::riscv_pause);
1304 return Builder.CreateCall(Callee: Fn, Args: {});
1305 }
1306
1307 // XCValu
1308 case RISCV::BI__builtin_riscv_cv_alu_addN:
1309 ID = Intrinsic::riscv_cv_alu_addN;
1310 break;
1311 case RISCV::BI__builtin_riscv_cv_alu_addRN:
1312 ID = Intrinsic::riscv_cv_alu_addRN;
1313 break;
1314 case RISCV::BI__builtin_riscv_cv_alu_adduN:
1315 ID = Intrinsic::riscv_cv_alu_adduN;
1316 break;
1317 case RISCV::BI__builtin_riscv_cv_alu_adduRN:
1318 ID = Intrinsic::riscv_cv_alu_adduRN;
1319 break;
1320 case RISCV::BI__builtin_riscv_cv_alu_clip:
1321 ID = Intrinsic::riscv_cv_alu_clip;
1322 break;
1323 case RISCV::BI__builtin_riscv_cv_alu_clipu:
1324 ID = Intrinsic::riscv_cv_alu_clipu;
1325 break;
1326 case RISCV::BI__builtin_riscv_cv_alu_extbs:
1327 return Builder.CreateSExt(V: Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty), DestTy: Int32Ty,
1328 Name: "extbs");
1329 case RISCV::BI__builtin_riscv_cv_alu_extbz:
1330 return Builder.CreateZExt(V: Builder.CreateTrunc(V: Ops[0], DestTy: Int8Ty), DestTy: Int32Ty,
1331 Name: "extbz");
1332 case RISCV::BI__builtin_riscv_cv_alu_exths:
1333 return Builder.CreateSExt(V: Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty), DestTy: Int32Ty,
1334 Name: "exths");
1335 case RISCV::BI__builtin_riscv_cv_alu_exthz:
1336 return Builder.CreateZExt(V: Builder.CreateTrunc(V: Ops[0], DestTy: Int16Ty), DestTy: Int32Ty,
1337 Name: "exthz");
1338 case RISCV::BI__builtin_riscv_cv_alu_sle:
1339 return Builder.CreateZExt(V: Builder.CreateICmpSLE(LHS: Ops[0], RHS: Ops[1]), DestTy: Int32Ty,
1340 Name: "sle");
1341 case RISCV::BI__builtin_riscv_cv_alu_sleu:
1342 return Builder.CreateZExt(V: Builder.CreateICmpULE(LHS: Ops[0], RHS: Ops[1]), DestTy: Int32Ty,
1343 Name: "sleu");
1344 case RISCV::BI__builtin_riscv_cv_alu_subN:
1345 ID = Intrinsic::riscv_cv_alu_subN;
1346 break;
1347 case RISCV::BI__builtin_riscv_cv_alu_subRN:
1348 ID = Intrinsic::riscv_cv_alu_subRN;
1349 break;
1350 case RISCV::BI__builtin_riscv_cv_alu_subuN:
1351 ID = Intrinsic::riscv_cv_alu_subuN;
1352 break;
1353 case RISCV::BI__builtin_riscv_cv_alu_subuRN:
1354 ID = Intrinsic::riscv_cv_alu_subuRN;
1355 break;
1356
1357 // Vector builtins are handled from here.
1358#include "clang/Basic/riscv_vector_builtin_cg.inc"
1359
1360 // SiFive Vector builtins are handled from here.
1361#include "clang/Basic/riscv_sifive_vector_builtin_cg.inc"
1362
1363 // Andes Vector builtins are handled from here.
1364#include "clang/Basic/riscv_andes_vector_builtin_cg.inc"
1365 }
1366
1367 assert(ID != Intrinsic::not_intrinsic);
1368
1369 llvm::Function *F = CGM.getIntrinsic(IID: ID, Tys: IntrinsicTypes);
1370 return Builder.CreateCall(Callee: F, Args: Ops, Name: "");
1371}
1372

source code of clang/lib/CodeGen/TargetBuiltins/RISCV.cpp