LLVM 20.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
44 const std::string BuiltinName;
46
50
57
58 bool isSpirvOp() const { return BuiltinName.rfind("__spirv_", 0) == 0; }
59};
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
74 bool IsElect;
84};
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
92 bool IsBlock;
93 bool IsWrite;
94 bool IsMedia;
95};
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
103};
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
111};
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
137 InstructionSet::InstructionSet Set;
142 FPRoundingMode::FPRoundingMode RoundingMode;
143};
144
147 InstructionSet::InstructionSet Set;
151 FPRoundingMode::FPRoundingMode RoundingMode;
152};
153
154using namespace FPRoundingMode;
155#define GET_ConvertBuiltins_DECL
156#define GET_ConvertBuiltins_IMPL
157
158using namespace InstructionSet;
159#define GET_VectorLoadStoreBuiltins_DECL
160#define GET_VectorLoadStoreBuiltins_IMPL
161
162#define GET_CLMemoryScope_DECL
163#define GET_CLSamplerAddressingMode_DECL
164#define GET_CLMemoryFenceFlags_DECL
165#define GET_ExtendedBuiltins_DECL
166#include "SPIRVGenTables.inc"
167} // namespace SPIRV
168
169//===----------------------------------------------------------------------===//
170// Misc functions for looking up builtins and veryfying requirements using
171// TableGen records
172//===----------------------------------------------------------------------===//
173
174namespace SPIRV {
175/// Parses the name part of the demangled builtin call.
176std::string lookupBuiltinNameHelper(StringRef DemangledCall,
177 FPDecorationId *DecorationId) {
178 const static std::string PassPrefix = "(anonymous namespace)::";
179 std::string BuiltinName;
180 // Itanium Demangler result may have "(anonymous namespace)::" prefix
181 if (DemangledCall.starts_with(PassPrefix.c_str()))
182 BuiltinName = DemangledCall.substr(PassPrefix.length());
183 else
184 BuiltinName = DemangledCall;
185 // Extract the builtin function name and types of arguments from the call
186 // skeleton.
187 BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
188
189 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
190 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
191 BuiltinName = BuiltinName.substr(12);
192
193 // Check if the extracted name contains type information between angle
194 // brackets. If so, the builtin is an instantiated template - needs to have
195 // the information after angle brackets and return type removed.
196 std::size_t Pos1 = BuiltinName.rfind('<');
197 if (Pos1 != std::string::npos && BuiltinName.back() == '>') {
198 std::size_t Pos2 = BuiltinName.rfind(' ', Pos1);
199 if (Pos2 == std::string::npos)
200 Pos2 = 0;
201 else
202 ++Pos2;
203 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
204 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
205 }
206
207 // Check if the extracted name begins with:
208 // - "__spirv_ImageSampleExplicitLod"
209 // - "__spirv_ImageRead"
210 // - "__spirv_ImageQuerySizeLod"
211 // - "__spirv_UDotKHR"
212 // - "__spirv_SDotKHR"
213 // - "__spirv_SUDotKHR"
214 // - "__spirv_SDotAccSatKHR"
215 // - "__spirv_UDotAccSatKHR"
216 // - "__spirv_SUDotAccSatKHR"
217 // - "__spirv_ReadClockKHR"
218 // - "__spirv_SubgroupBlockReadINTEL"
219 // - "__spirv_SubgroupImageBlockReadINTEL"
220 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
221 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
222 // - "__spirv_Convert"
223 // - "__spirv_UConvert"
224 // - "__spirv_SConvert"
225 // - "__spirv_FConvert"
226 // - "__spirv_SatConvert"
227 // and contains return type information at the end "_R<type>".
228 // If so, extract the plain builtin name without the type information.
229 static const std::regex SpvWithR(
230 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageQuerySizeLod|UDotKHR|"
231 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
232 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
233 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
234 "Convert|"
235 "UConvert|SConvert|FConvert|SatConvert).*)_R[^_]*_?(\\w+)?.*");
236 std::smatch Match;
237 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
238 std::ssub_match SubMatch;
239 if (DecorationId && Match.size() > 3) {
240 SubMatch = Match[3];
241 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
242 }
243 SubMatch = Match[1];
244 BuiltinName = SubMatch.str();
245 }
246
247 return BuiltinName;
248}
249} // namespace SPIRV
250
251/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
252/// the provided \p DemangledCall and specified \p Set.
253///
254/// The lookup follows the following algorithm, returning the first successful
255/// match:
256/// 1. Search with the plain demangled name (expecting a 1:1 match).
257/// 2. Search with the prefix before or suffix after the demangled name
258/// signyfying the type of the first argument.
259///
260/// \returns Wrapper around the demangled call and found builtin definition.
261static std::unique_ptr<const SPIRV::IncomingCall>
263 SPIRV::InstructionSet::InstructionSet Set,
264 Register ReturnRegister, const SPIRVType *ReturnType,
266 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
267
268 SmallVector<StringRef, 10> BuiltinArgumentTypes;
269 StringRef BuiltinArgs =
270 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
271 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
272
273 // Look up the builtin in the defined set. Start with the plain demangled
274 // name, expecting a 1:1 match in the defined builtin set.
275 const SPIRV::DemangledBuiltin *Builtin;
276 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
277 return std::make_unique<SPIRV::IncomingCall>(
278 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
279
280 // If the initial look up was unsuccessful and the demangled call takes at
281 // least 1 argument, add a prefix or suffix signifying the type of the first
282 // argument and repeat the search.
283 if (BuiltinArgumentTypes.size() >= 1) {
284 char FirstArgumentType = BuiltinArgumentTypes[0][0];
285 // Prefix to be added to the builtin's name for lookup.
286 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
287 std::string Prefix;
288
289 switch (FirstArgumentType) {
290 // Unsigned:
291 case 'u':
292 if (Set == SPIRV::InstructionSet::OpenCL_std)
293 Prefix = "u_";
294 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
295 Prefix = "u";
296 break;
297 // Signed:
298 case 'c':
299 case 's':
300 case 'i':
301 case 'l':
302 if (Set == SPIRV::InstructionSet::OpenCL_std)
303 Prefix = "s_";
304 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
305 Prefix = "s";
306 break;
307 // Floating-point:
308 case 'f':
309 case 'd':
310 case 'h':
311 if (Set == SPIRV::InstructionSet::OpenCL_std ||
312 Set == SPIRV::InstructionSet::GLSL_std_450)
313 Prefix = "f";
314 break;
315 }
316
317 // If argument-type name prefix was added, look up the builtin again.
318 if (!Prefix.empty() &&
319 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
320 return std::make_unique<SPIRV::IncomingCall>(
321 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
322
323 // If lookup with a prefix failed, find a suffix to be added to the
324 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
325 // an unsigned value has a suffix "u".
326 std::string Suffix;
327
328 switch (FirstArgumentType) {
329 // Unsigned:
330 case 'u':
331 Suffix = "u";
332 break;
333 // Signed:
334 case 'c':
335 case 's':
336 case 'i':
337 case 'l':
338 Suffix = "s";
339 break;
340 // Floating-point:
341 case 'f':
342 case 'd':
343 case 'h':
344 Suffix = "f";
345 break;
346 }
347
348 // If argument-type name suffix was added, look up the builtin again.
349 if (!Suffix.empty() &&
350 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
351 return std::make_unique<SPIRV::IncomingCall>(
352 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
353 }
354
355 // No builtin with such name was found in the set.
356 return nullptr;
357}
358
361 // We expect the following sequence of instructions:
362 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
363 // or = G_GLOBAL_VALUE @block_literal_global
364 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
365 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
366 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
367 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
368 MI->getOperand(1).isReg());
369 Register BitcastReg = MI->getOperand(1).getReg();
370 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
371 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
372 BitcastMI->getOperand(2).isReg());
373 Register ValueReg = BitcastMI->getOperand(2).getReg();
374 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
375 return ValueMI;
376}
377
378// Return an integer constant corresponding to the given register and
379// defined in spv_track_constant.
380// TODO: maybe unify with prelegalizer pass.
382 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
383 assert(isSpvIntrinsic(*DefMI, Intrinsic::spv_track_constant) &&
384 DefMI->getOperand(2).isReg());
385 MachineInstr *DefMI2 = MRI->getUniqueVRegDef(DefMI->getOperand(2).getReg());
386 assert(DefMI2->getOpcode() == TargetOpcode::G_CONSTANT &&
387 DefMI2->getOperand(1).isCImm());
388 return DefMI2->getOperand(1).getCImm()->getValue().getZExtValue();
389}
390
391// Return type of the instruction result from spv_assign_type intrinsic.
392// TODO: maybe unify with prelegalizer pass.
394 MachineInstr *NextMI = MI->getNextNode();
395 if (!NextMI)
396 return nullptr;
397 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
398 if ((NextMI = NextMI->getNextNode()) == nullptr)
399 return nullptr;
400 Register ValueReg = MI->getOperand(0).getReg();
401 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
402 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
403 NextMI->getOperand(1).getReg() != ValueReg)
404 return nullptr;
405 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
406 assert(Ty && "Type is expected");
407 return Ty;
408}
409
410static const Type *getBlockStructType(Register ParamReg,
412 // In principle, this information should be passed to us from Clang via
413 // an elementtype attribute. However, said attribute requires that
414 // the function call be an intrinsic, which is not. Instead, we rely on being
415 // able to trace this to the declaration of a variable: OpenCL C specification
416 // section 6.12.5 should guarantee that we can do this.
418 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
419 return MI->getOperand(1).getGlobal()->getType();
420 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
421 "Blocks in OpenCL C must be traceable to allocation site");
422 return getMachineInstrType(MI);
423}
424
425//===----------------------------------------------------------------------===//
426// Helper functions for building misc instructions
427//===----------------------------------------------------------------------===//
428
429/// Helper function building either a resulting scalar or vector bool register
430/// depending on the expected \p ResultType.
431///
432/// \returns Tuple of the resulting register and its type.
433static std::tuple<Register, SPIRVType *>
434buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
436 LLT Type;
437 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
438
439 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
440 unsigned VectorElements = ResultType->getOperand(2).getImm();
441 BoolType =
442 GR->getOrCreateSPIRVVectorType(BoolType, VectorElements, MIRBuilder);
444 cast<FixedVectorType>(GR->getTypeForSPIRVType(BoolType));
445 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
446 } else {
447 Type = LLT::scalar(1);
448 }
449
450 Register ResultRegister =
452 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
453 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
454 return std::make_tuple(ResultRegister, BoolType);
455}
456
457/// Helper function for building either a vector or scalar select instruction
458/// depending on the expected \p ResultType.
459static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
460 Register ReturnRegister, Register SourceRegister,
461 const SPIRVType *ReturnType,
463 Register TrueConst, FalseConst;
464
465 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
466 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
468 TrueConst = GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType);
469 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType);
470 } else {
471 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType);
472 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType);
473 }
474
475 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
476 FalseConst);
477}
478
479/// Helper function for building a load instruction loading into the
480/// \p DestinationReg.
482 MachineIRBuilder &MIRBuilder,
483 SPIRVGlobalRegistry *GR, LLT LowLevelType,
484 Register DestinationReg = Register(0)) {
485 if (!DestinationReg.isValid())
486 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
487 // TODO: consider using correct address space and alignment (p0 is canonical
488 // type for selection though).
490 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
491 return DestinationReg;
492}
493
494/// Helper function for building a load instruction for loading a builtin global
495/// variable of \p BuiltinValue value.
497 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
498 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
499 Register Reg = Register(0), bool isConst = true, bool hasLinkageTy = true) {
500 Register NewRegister =
501 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
502 MIRBuilder.getMRI()->setType(
503 NewRegister,
504 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
505 GR->getPointerSize()));
507 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
508 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
509
510 // Set up the global OpVariable with the necessary builtin decorations.
511 Register Variable = GR->buildGlobalVariable(
512 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
513 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst,
514 /* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
515 false);
516
517 // Load the value from the global variable.
518 Register LoadedRegister =
519 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
520 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
521 return LoadedRegister;
522}
523
524/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
525/// and its definition, set the new register as a destination of the definition,
526/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
527/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
528/// SPIRVPreLegalizer.cpp.
529extern Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
530 SPIRVGlobalRegistry *GR,
531 MachineIRBuilder &MIB,
532 MachineRegisterInfo &MRI);
533
534// TODO: Move to TableGen.
535static SPIRV::MemorySemantics::MemorySemantics
536getSPIRVMemSemantics(std::memory_order MemOrder) {
537 switch (MemOrder) {
538 case std::memory_order_relaxed:
539 return SPIRV::MemorySemantics::None;
540 case std::memory_order_acquire:
541 return SPIRV::MemorySemantics::Acquire;
542 case std::memory_order_release:
543 return SPIRV::MemorySemantics::Release;
544 case std::memory_order_acq_rel:
545 return SPIRV::MemorySemantics::AcquireRelease;
546 case std::memory_order_seq_cst:
547 return SPIRV::MemorySemantics::SequentiallyConsistent;
548 default:
549 report_fatal_error("Unknown CL memory scope");
550 }
551}
552
553static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
554 switch (ClScope) {
555 case SPIRV::CLMemoryScope::memory_scope_work_item:
556 return SPIRV::Scope::Invocation;
557 case SPIRV::CLMemoryScope::memory_scope_work_group:
558 return SPIRV::Scope::Workgroup;
559 case SPIRV::CLMemoryScope::memory_scope_device:
560 return SPIRV::Scope::Device;
561 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
562 return SPIRV::Scope::CrossDevice;
563 case SPIRV::CLMemoryScope::memory_scope_sub_group:
564 return SPIRV::Scope::Subgroup;
565 }
566 report_fatal_error("Unknown CL memory scope");
567}
568
570 MachineIRBuilder &MIRBuilder,
572 return GR->buildConstantInt(Val, MIRBuilder,
573 GR->getOrCreateSPIRVIntegerType(32, MIRBuilder));
574}
575
576static Register buildScopeReg(Register CLScopeRegister,
577 SPIRV::Scope::Scope Scope,
578 MachineIRBuilder &MIRBuilder,
581 if (CLScopeRegister.isValid()) {
582 auto CLScope =
583 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
584 Scope = getSPIRVScope(CLScope);
585
586 if (CLScope == static_cast<unsigned>(Scope)) {
587 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
588 return CLScopeRegister;
589 }
590 }
591 return buildConstantIntReg32(Scope, MIRBuilder, GR);
592}
593
596 if (MRI->getRegClassOrNull(Reg))
597 return;
598 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
599 MRI->setRegClass(Reg,
600 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
601}
602
603static Register buildMemSemanticsReg(Register SemanticsRegister,
604 Register PtrRegister, unsigned &Semantics,
605 MachineIRBuilder &MIRBuilder,
607 if (SemanticsRegister.isValid()) {
608 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
609 std::memory_order Order =
610 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
611 Semantics =
612 getSPIRVMemSemantics(Order) |
614 if (static_cast<unsigned>(Order) == Semantics) {
615 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
616 return SemanticsRegister;
617 }
618 }
619 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
620}
621
622static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
623 const SPIRV::IncomingCall *Call,
624 Register TypeReg,
625 ArrayRef<uint32_t> ImmArgs = {}) {
626 auto MIB = MIRBuilder.buildInstr(Opcode);
627 if (TypeReg.isValid())
628 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
629 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
630 for (unsigned i = 0; i < Sz; ++i)
631 MIB.addUse(Call->Arguments[i]);
632 for (uint32_t ImmArg : ImmArgs)
633 MIB.addImm(ImmArg);
634 return true;
635}
636
637/// Helper function for translating atomic init to OpStore.
639 MachineIRBuilder &MIRBuilder) {
640 if (Call->isSpirvOp())
641 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
642
643 assert(Call->Arguments.size() == 2 &&
644 "Need 2 arguments for atomic init translation");
645 MIRBuilder.buildInstr(SPIRV::OpStore)
646 .addUse(Call->Arguments[0])
647 .addUse(Call->Arguments[1]);
648 return true;
649}
650
651/// Helper function for building an atomic load instruction.
653 MachineIRBuilder &MIRBuilder,
655 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
656 if (Call->isSpirvOp())
657 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
658
659 Register PtrRegister = Call->Arguments[0];
660 // TODO: if true insert call to __translate_ocl_memory_sccope before
661 // OpAtomicLoad and the function implementation. We can use Translator's
662 // output for transcoding/atomic_explicit_arguments.cl as an example.
663 Register ScopeRegister =
664 Call->Arguments.size() > 1
665 ? Call->Arguments[1]
666 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
667 Register MemSemanticsReg;
668 if (Call->Arguments.size() > 2) {
669 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
670 MemSemanticsReg = Call->Arguments[2];
671 } else {
672 int Semantics =
673 SPIRV::MemorySemantics::SequentiallyConsistent |
675 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
676 }
677
678 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
679 .addDef(Call->ReturnRegister)
680 .addUse(TypeReg)
681 .addUse(PtrRegister)
682 .addUse(ScopeRegister)
683 .addUse(MemSemanticsReg);
684 return true;
685}
686
687/// Helper function for building an atomic store instruction.
689 MachineIRBuilder &MIRBuilder,
691 if (Call->isSpirvOp())
692 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call, Register(0));
693
694 Register ScopeRegister =
695 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
696 Register PtrRegister = Call->Arguments[0];
697 int Semantics =
698 SPIRV::MemorySemantics::SequentiallyConsistent |
700 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
701 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
702 .addUse(PtrRegister)
703 .addUse(ScopeRegister)
704 .addUse(MemSemanticsReg)
705 .addUse(Call->Arguments[1]);
706 return true;
707}
708
709/// Helper function for building an atomic compare-exchange instruction.
711 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
712 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
713 if (Call->isSpirvOp())
714 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
715 GR->getSPIRVTypeID(Call->ReturnType));
716
717 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
718 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
719
720 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
721 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
722 Register Desired = Call->Arguments[2]; // Value (C Desired).
723 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
724 LLT DesiredLLT = MRI->getType(Desired);
725
726 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
727 SPIRV::OpTypePointer);
728 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
729 (void)ExpectedType;
730 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
731 : ExpectedType == SPIRV::OpTypePointer);
732 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
733
734 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
735 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
736 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
737 SpvObjectPtrTy->getOperand(1).getImm());
738 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
739
740 Register MemSemEqualReg;
741 Register MemSemUnequalReg;
742 uint64_t MemSemEqual =
743 IsCmpxchg
744 ? SPIRV::MemorySemantics::None
745 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
746 uint64_t MemSemUnequal =
747 IsCmpxchg
748 ? SPIRV::MemorySemantics::None
749 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
750 if (Call->Arguments.size() >= 4) {
751 assert(Call->Arguments.size() >= 5 &&
752 "Need 5+ args for explicit atomic cmpxchg");
753 auto MemOrdEq =
754 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
755 auto MemOrdNeq =
756 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
757 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
758 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
759 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
760 MemSemEqualReg = Call->Arguments[3];
761 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
762 MemSemUnequalReg = Call->Arguments[4];
763 }
764 if (!MemSemEqualReg.isValid())
765 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
766 if (!MemSemUnequalReg.isValid())
767 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
768
769 Register ScopeReg;
770 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
771 if (Call->Arguments.size() >= 6) {
772 assert(Call->Arguments.size() == 6 &&
773 "Extra args for explicit atomic cmpxchg");
774 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
775 getIConstVal(Call->Arguments[5], MRI));
776 Scope = getSPIRVScope(ClScope);
777 if (ClScope == static_cast<unsigned>(Scope))
778 ScopeReg = Call->Arguments[5];
779 }
780 if (!ScopeReg.isValid())
781 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
782
783 Register Expected = IsCmpxchg
784 ? ExpectedArg
785 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
786 GR, LLT::scalar(64));
787 MRI->setType(Expected, DesiredLLT);
788 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
789 : Call->ReturnRegister;
790 if (!MRI->getRegClassOrNull(Tmp))
791 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
792 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
793
794 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
795 MIRBuilder.buildInstr(Opcode)
796 .addDef(Tmp)
797 .addUse(GR->getSPIRVTypeID(IntTy))
798 .addUse(ObjectPtr)
799 .addUse(ScopeReg)
800 .addUse(MemSemEqualReg)
801 .addUse(MemSemUnequalReg)
802 .addUse(Desired)
804 if (!IsCmpxchg) {
805 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
806 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
807 }
808 return true;
809}
810
811/// Helper function for building atomic instructions.
812static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
813 MachineIRBuilder &MIRBuilder,
815 if (Call->isSpirvOp())
816 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
817 GR->getSPIRVTypeID(Call->ReturnType));
818
819 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
820 Register ScopeRegister =
821 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
822
823 assert(Call->Arguments.size() <= 4 &&
824 "Too many args for explicit atomic RMW");
825 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
826 MIRBuilder, GR, MRI);
827
828 Register PtrRegister = Call->Arguments[0];
829 unsigned Semantics = SPIRV::MemorySemantics::None;
830 Register MemSemanticsReg =
831 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
832 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
833 Semantics, MIRBuilder, GR);
834 Register ValueReg = Call->Arguments[1];
835 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
836 // support cl_ext_float_atomics
837 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
838 if (Opcode == SPIRV::OpAtomicIAdd) {
839 Opcode = SPIRV::OpAtomicFAddEXT;
840 } else if (Opcode == SPIRV::OpAtomicISub) {
841 // Translate OpAtomicISub applied to a floating type argument to
842 // OpAtomicFAddEXT with the negative value operand
843 Opcode = SPIRV::OpAtomicFAddEXT;
844 Register NegValueReg =
845 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
846 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
847 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
848 MIRBuilder.getMF());
849 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
850 .addDef(NegValueReg)
851 .addUse(ValueReg);
852 insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
853 MIRBuilder.getMF().getRegInfo());
854 ValueReg = NegValueReg;
855 }
856 }
857 MIRBuilder.buildInstr(Opcode)
858 .addDef(Call->ReturnRegister)
859 .addUse(ValueTypeReg)
860 .addUse(PtrRegister)
861 .addUse(ScopeRegister)
862 .addUse(MemSemanticsReg)
863 .addUse(ValueReg);
864 return true;
865}
866
867/// Helper function for building an atomic floating-type instruction.
869 unsigned Opcode,
870 MachineIRBuilder &MIRBuilder,
872 assert(Call->Arguments.size() == 4 &&
873 "Wrong number of atomic floating-type builtin");
874 Register PtrReg = Call->Arguments[0];
875 Register ScopeReg = Call->Arguments[1];
876 Register MemSemanticsReg = Call->Arguments[2];
877 Register ValueReg = Call->Arguments[3];
878 MIRBuilder.buildInstr(Opcode)
879 .addDef(Call->ReturnRegister)
880 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
881 .addUse(PtrReg)
882 .addUse(ScopeReg)
883 .addUse(MemSemanticsReg)
884 .addUse(ValueReg);
885 return true;
886}
887
888/// Helper function for building atomic flag instructions (e.g.
889/// OpAtomicFlagTestAndSet).
891 unsigned Opcode, MachineIRBuilder &MIRBuilder,
893 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
894 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
895 if (Call->isSpirvOp())
896 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
897 IsSet ? TypeReg : Register(0));
898
899 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
900 Register PtrRegister = Call->Arguments[0];
901 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
902 Register MemSemanticsReg =
903 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
904 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
905 Semantics, MIRBuilder, GR);
906
907 assert((Opcode != SPIRV::OpAtomicFlagClear ||
908 (Semantics != SPIRV::MemorySemantics::Acquire &&
909 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
910 "Invalid memory order argument!");
911
912 Register ScopeRegister =
913 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
914 ScopeRegister =
915 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
916
917 auto MIB = MIRBuilder.buildInstr(Opcode);
918 if (IsSet)
919 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
920
921 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
922 return true;
923}
924
925/// Helper function for building barriers, i.e., memory/control ordering
926/// operations.
927static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
928 MachineIRBuilder &MIRBuilder,
930 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
931 const auto *ST =
932 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
933 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
934 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
935 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
936 std::string DiagMsg = std::string(Builtin->Name) +
937 ": the builtin requires the following SPIR-V "
938 "extension: SPV_INTEL_split_barrier";
939 report_fatal_error(DiagMsg.c_str(), false);
940 }
941
942 if (Call->isSpirvOp())
943 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
944
945 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
946 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
947 unsigned MemSemantics = SPIRV::MemorySemantics::None;
948
949 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
950 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
951
952 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
953 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
954
955 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
956 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
957
958 if (Opcode == SPIRV::OpMemoryBarrier)
959 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
960 getIConstVal(Call->Arguments[1], MRI))) |
961 MemSemantics;
962 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
963 MemSemantics |= SPIRV::MemorySemantics::Release;
964 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
965 MemSemantics |= SPIRV::MemorySemantics::Acquire;
966 else
967 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
968
969 Register MemSemanticsReg =
970 MemFlags == MemSemantics
971 ? Call->Arguments[0]
972 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
973 Register ScopeReg;
974 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
975 SPIRV::Scope::Scope MemScope = Scope;
976 if (Call->Arguments.size() >= 2) {
977 assert(
978 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
979 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
980 "Extra args for explicitly scoped barrier");
981 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
982 : Call->Arguments[1];
983 SPIRV::CLMemoryScope CLScope =
984 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
985 MemScope = getSPIRVScope(CLScope);
986 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
987 (Opcode == SPIRV::OpMemoryBarrier))
988 Scope = MemScope;
989 if (CLScope == static_cast<unsigned>(Scope))
990 ScopeReg = Call->Arguments[1];
991 }
992
993 if (!ScopeReg.isValid())
994 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
995
996 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
997 if (Opcode != SPIRV::OpMemoryBarrier)
998 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
999 MIB.addUse(MemSemanticsReg);
1000 return true;
1001}
1002
1003static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1004 switch (dim) {
1005 case SPIRV::Dim::DIM_1D:
1006 case SPIRV::Dim::DIM_Buffer:
1007 return 1;
1008 case SPIRV::Dim::DIM_2D:
1009 case SPIRV::Dim::DIM_Cube:
1010 case SPIRV::Dim::DIM_Rect:
1011 return 2;
1012 case SPIRV::Dim::DIM_3D:
1013 return 3;
1014 default:
1015 report_fatal_error("Cannot get num components for given Dim");
1016 }
1017}
1018
1019/// Helper function for obtaining the number of size components.
1020static unsigned getNumSizeComponents(SPIRVType *imgType) {
1021 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1022 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1023 unsigned numComps = getNumComponentsForDim(dim);
1024 bool arrayed = imgType->getOperand(4).getImm() == 1;
1025 return arrayed ? numComps + 1 : numComps;
1026}
1027
1028//===----------------------------------------------------------------------===//
1029// Implementation functions for each builtin group
1030//===----------------------------------------------------------------------===//
1031
1032static bool generateExtInst(const SPIRV::IncomingCall *Call,
1033 MachineIRBuilder &MIRBuilder,
1034 SPIRVGlobalRegistry *GR) {
1035 // Lookup the extended instruction number in the TableGen records.
1036 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1038 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1039
1040 // Build extended instruction.
1041 auto MIB =
1042 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1043 .addDef(Call->ReturnRegister)
1044 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1045 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1046 .addImm(Number);
1047
1048 for (auto Argument : Call->Arguments)
1049 MIB.addUse(Argument);
1050 return true;
1051}
1052
1054 MachineIRBuilder &MIRBuilder,
1055 SPIRVGlobalRegistry *GR) {
1056 // Lookup the instruction opcode in the TableGen records.
1057 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1058 unsigned Opcode =
1059 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1060
1061 Register CompareRegister;
1062 SPIRVType *RelationType;
1063 std::tie(CompareRegister, RelationType) =
1064 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1065
1066 // Build relational instruction.
1067 auto MIB = MIRBuilder.buildInstr(Opcode)
1068 .addDef(CompareRegister)
1069 .addUse(GR->getSPIRVTypeID(RelationType));
1070
1071 for (auto Argument : Call->Arguments)
1072 MIB.addUse(Argument);
1073
1074 // Build select instruction.
1075 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1076 Call->ReturnType, GR);
1077}
1078
1080 MachineIRBuilder &MIRBuilder,
1081 SPIRVGlobalRegistry *GR) {
1082 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1083 const SPIRV::GroupBuiltin *GroupBuiltin =
1084 SPIRV::lookupGroupBuiltin(Builtin->Name);
1085
1086 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1087 if (Call->isSpirvOp()) {
1088 if (GroupBuiltin->NoGroupOperation)
1089 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1090 GR->getSPIRVTypeID(Call->ReturnType));
1091
1092 // Group Operation is a literal
1093 Register GroupOpReg = Call->Arguments[1];
1094 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1095 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1097 "Group Operation parameter must be an integer constant");
1098 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1099 Register ScopeReg = Call->Arguments[0];
1100 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1101 .addDef(Call->ReturnRegister)
1102 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1103 .addUse(ScopeReg)
1104 .addImm(GrpOp);
1105 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1106 MIB.addUse(Call->Arguments[i]);
1107 return true;
1108 }
1109
1110 Register Arg0;
1111 if (GroupBuiltin->HasBoolArg) {
1112 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
1113 Register BoolReg = Call->Arguments[0];
1114 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1115 if (!BoolRegType)
1116 report_fatal_error("Can't find a register's type definition");
1117 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1118 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1119 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1120 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1121 BoolType);
1122 } else {
1123 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1124 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1125 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1126 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1127 MIRBuilder.buildICmp(CmpInst::ICMP_NE, Arg0, BoolReg,
1128 GR->buildConstantInt(0, MIRBuilder, BoolRegType));
1129 insertAssignInstr(Arg0, nullptr, BoolType, GR, MIRBuilder,
1130 MIRBuilder.getMF().getRegInfo());
1131 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1132 report_fatal_error("Expect a boolean argument");
1133 }
1134 // if BoolReg is a boolean register, we don't need to do anything
1135 }
1136 }
1137
1138 Register GroupResultRegister = Call->ReturnRegister;
1139 SPIRVType *GroupResultType = Call->ReturnType;
1140
1141 // TODO: maybe we need to check whether the result type is already boolean
1142 // and in this case do not insert select instruction.
1143 const bool HasBoolReturnTy =
1144 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1145 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1146 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1147
1148 if (HasBoolReturnTy)
1149 std::tie(GroupResultRegister, GroupResultType) =
1150 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1151
1152 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1153 : SPIRV::Scope::Workgroup;
1154 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1155
1156 Register VecReg;
1157 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1158 Call->Arguments.size() > 2) {
1159 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1160 // scalar, a vector with 2 components, or a vector with 3 components.",
1161 // meaning that we must create a vector from the function arguments if
1162 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1163 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1164 Register ElemReg = Call->Arguments[1];
1165 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1166 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1167 report_fatal_error("Expect an integer <LocalId> argument");
1168 unsigned VecLen = Call->Arguments.size() - 1;
1169 VecReg = MRI->createGenericVirtualRegister(
1170 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1171 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1172 SPIRVType *VecType =
1173 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder);
1174 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1175 auto MIB =
1176 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1177 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1178 MIB.addUse(Call->Arguments[i]);
1179 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1180 }
1181 insertAssignInstr(VecReg, nullptr, VecType, GR, MIRBuilder,
1182 MIRBuilder.getMF().getRegInfo());
1183 }
1184
1185 // Build work/sub group instruction.
1186 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1187 .addDef(GroupResultRegister)
1188 .addUse(GR->getSPIRVTypeID(GroupResultType))
1189 .addUse(ScopeRegister);
1190
1191 if (!GroupBuiltin->NoGroupOperation)
1192 MIB.addImm(GroupBuiltin->GroupOperation);
1193 if (Call->Arguments.size() > 0) {
1194 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1195 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1196 if (VecReg.isValid())
1197 MIB.addUse(VecReg);
1198 else
1199 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1200 MIB.addUse(Call->Arguments[i]);
1201 }
1202
1203 // Build select instruction.
1204 if (HasBoolReturnTy)
1205 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1206 Call->ReturnType, GR);
1207 return true;
1208}
1209
1211 MachineIRBuilder &MIRBuilder,
1212 SPIRVGlobalRegistry *GR) {
1213 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1214 MachineFunction &MF = MIRBuilder.getMF();
1215 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1216 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1217 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1218
1219 if (IntelSubgroups->IsMedia &&
1220 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1221 std::string DiagMsg = std::string(Builtin->Name) +
1222 ": the builtin requires the following SPIR-V "
1223 "extension: SPV_INTEL_media_block_io";
1224 report_fatal_error(DiagMsg.c_str(), false);
1225 } else if (!IntelSubgroups->IsMedia &&
1226 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1227 std::string DiagMsg = std::string(Builtin->Name) +
1228 ": the builtin requires the following SPIR-V "
1229 "extension: SPV_INTEL_subgroups";
1230 report_fatal_error(DiagMsg.c_str(), false);
1231 }
1232
1233 uint32_t OpCode = IntelSubgroups->Opcode;
1234 if (Call->isSpirvOp()) {
1235 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1236 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1237 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1238 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1239 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1240 : Register(0));
1241 }
1242
1243 if (IntelSubgroups->IsBlock) {
1244 // Minimal number or arguments set in TableGen records is 1
1245 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1246 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1247 // TODO: add required validation from the specification:
1248 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1249 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1250 // dimensions require a capability."
1251 switch (OpCode) {
1252 case SPIRV::OpSubgroupBlockReadINTEL:
1253 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1254 break;
1255 case SPIRV::OpSubgroupBlockWriteINTEL:
1256 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1257 break;
1258 }
1259 }
1260 }
1261 }
1262
1263 // TODO: opaque pointers types should be eventually resolved in such a way
1264 // that validation of block read is enabled with respect to the following
1265 // specification requirement:
1266 // "'Result Type' may be a scalar or vector type, and its component type must
1267 // be equal to the type pointed to by 'Ptr'."
1268 // For example, function parameter type should not be default i8 pointer, but
1269 // depend on the result type of the instruction where it is used as a pointer
1270 // argument of OpSubgroupBlockReadINTEL
1271
1272 // Build Intel subgroups instruction
1274 IntelSubgroups->IsWrite
1275 ? MIRBuilder.buildInstr(OpCode)
1276 : MIRBuilder.buildInstr(OpCode)
1277 .addDef(Call->ReturnRegister)
1278 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1279 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1280 MIB.addUse(Call->Arguments[i]);
1281 return true;
1282}
1283
1285 MachineIRBuilder &MIRBuilder,
1286 SPIRVGlobalRegistry *GR) {
1287 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1288 MachineFunction &MF = MIRBuilder.getMF();
1289 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1290 if (!ST->canUseExtension(
1291 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1292 std::string DiagMsg = std::string(Builtin->Name) +
1293 ": the builtin requires the following SPIR-V "
1294 "extension: SPV_KHR_uniform_group_instructions";
1295 report_fatal_error(DiagMsg.c_str(), false);
1296 }
1297 const SPIRV::GroupUniformBuiltin *GroupUniform =
1298 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1299 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1300
1301 Register GroupResultReg = Call->ReturnRegister;
1302 Register ScopeReg = Call->Arguments[0];
1303 Register ValueReg = Call->Arguments[2];
1304
1305 // Group Operation
1306 Register ConstGroupOpReg = Call->Arguments[1];
1307 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1308 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1310 "expect a constant group operation for a uniform group instruction",
1311 false);
1312 const MachineOperand &ConstOperand = Const->getOperand(1);
1313 if (!ConstOperand.isCImm())
1314 report_fatal_error("uniform group instructions: group operation must be an "
1315 "integer constant",
1316 false);
1317
1318 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1319 .addDef(GroupResultReg)
1320 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1321 .addUse(ScopeReg);
1322 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1323 MIB.addUse(ValueReg);
1324
1325 return true;
1326}
1327
1329 MachineIRBuilder &MIRBuilder,
1330 SPIRVGlobalRegistry *GR) {
1331 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1332 MachineFunction &MF = MIRBuilder.getMF();
1333 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1334 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1335 std::string DiagMsg = std::string(Builtin->Name) +
1336 ": the builtin requires the following SPIR-V "
1337 "extension: SPV_KHR_shader_clock";
1338 report_fatal_error(DiagMsg.c_str(), false);
1339 }
1340
1341 Register ResultReg = Call->ReturnRegister;
1342
1343 // Deduce the `Scope` operand from the builtin function name.
1344 SPIRV::Scope::Scope ScopeArg =
1346 .EndsWith("device", SPIRV::Scope::Scope::Device)
1347 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1348 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1349 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1350
1351 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1352 .addDef(ResultReg)
1353 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1354 .addUse(ScopeReg);
1355
1356 return true;
1357}
1358
1359// These queries ask for a single size_t result for a given dimension index, e.g
1360// size_t get_global_id(uint dimindex). In SPIR-V, the builtins corresonding to
1361// these values are all vec3 types, so we need to extract the correct index or
1362// return defaultVal (0 or 1 depending on the query). We also handle extending
1363// or tuncating in case size_t does not match the expected result type's
1364// bitwidth.
1365//
1366// For a constant index >= 3 we generate:
1367// %res = OpConstant %SizeT 0
1368//
1369// For other indices we generate:
1370// %g = OpVariable %ptr_V3_SizeT Input
1371// OpDecorate %g BuiltIn XXX
1372// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1373// OpDecorate %g Constant
1374// %loadedVec = OpLoad %V3_SizeT %g
1375//
1376// Then, if the index is constant < 3, we generate:
1377// %res = OpCompositeExtract %SizeT %loadedVec idx
1378// If the index is dynamic, we generate:
1379// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1380// %cmp = OpULessThan %bool %idx %const_3
1381// %res = OpSelect %SizeT %cmp %tmp %const_0
1382//
1383// If the bitwidth of %res does not match the expected return type, we add an
1384// extend or truncate.
1386 MachineIRBuilder &MIRBuilder,
1388 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1389 uint64_t DefaultValue) {
1390 Register IndexRegister = Call->Arguments[0];
1391 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1392 const unsigned PointerSize = GR->getPointerSize();
1393 const SPIRVType *PointerSizeType =
1394 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1395 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1396 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1397
1398 // Set up the final register to do truncation or extension on at the end.
1399 Register ToTruncate = Call->ReturnRegister;
1400
1401 // If the index is constant, we can statically determine if it is in range.
1402 bool IsConstantIndex =
1403 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1404
1405 // If it's out of range (max dimension is 3), we can just return the constant
1406 // default value (0 or 1 depending on which query function).
1407 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1408 Register DefaultReg = Call->ReturnRegister;
1409 if (PointerSize != ResultWidth) {
1410 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1411 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1412 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1413 MIRBuilder.getMF());
1414 ToTruncate = DefaultReg;
1415 }
1416 auto NewRegister =
1417 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1418 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1419 } else { // If it could be in range, we need to load from the given builtin.
1420 auto Vec3Ty =
1421 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder);
1422 Register LoadedVector =
1423 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1424 LLT::fixed_vector(3, PointerSize));
1425 // Set up the vreg to extract the result to (possibly a new temporary one).
1426 Register Extracted = Call->ReturnRegister;
1427 if (!IsConstantIndex || PointerSize != ResultWidth) {
1428 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1429 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1430 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1431 }
1432 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1433 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1434 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1435 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1436 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1437
1438 // If the index is dynamic, need check if it's < 3, and then use a select.
1439 if (!IsConstantIndex) {
1440 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
1441 *MRI);
1442
1443 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1444 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
1445
1446 Register CompareRegister =
1447 MRI->createGenericVirtualRegister(LLT::scalar(1));
1448 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1449 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1450
1451 // Use G_ICMP to check if idxVReg < 3.
1452 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1453 GR->buildConstantInt(3, MIRBuilder, IndexType));
1454
1455 // Get constant for the default value (0 or 1 depending on which
1456 // function).
1457 Register DefaultRegister =
1458 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1459
1460 // Get a register for the selection result (possibly a new temporary one).
1461 Register SelectionResult = Call->ReturnRegister;
1462 if (PointerSize != ResultWidth) {
1463 SelectionResult =
1464 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1465 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1466 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1467 MIRBuilder.getMF());
1468 }
1469 // Create the final G_SELECT to return the extracted value or the default.
1470 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1471 DefaultRegister);
1472 ToTruncate = SelectionResult;
1473 } else {
1474 ToTruncate = Extracted;
1475 }
1476 }
1477 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1478 if (PointerSize != ResultWidth)
1479 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1480 return true;
1481}
1482
1484 MachineIRBuilder &MIRBuilder,
1485 SPIRVGlobalRegistry *GR) {
1486 // Lookup the builtin variable record.
1487 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1488 SPIRV::BuiltIn::BuiltIn Value =
1489 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1490
1491 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1492 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1493
1494 // Build a load instruction for the builtin variable.
1495 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1496 LLT LLType;
1497 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1498 LLType =
1499 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1500 else
1501 LLType = LLT::scalar(BitWidth);
1502
1503 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1504 LLType, Call->ReturnRegister);
1505}
1506
1508 MachineIRBuilder &MIRBuilder,
1509 SPIRVGlobalRegistry *GR) {
1510 // Lookup the instruction opcode in the TableGen records.
1511 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1512 unsigned Opcode =
1513 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1514
1515 switch (Opcode) {
1516 case SPIRV::OpStore:
1517 return buildAtomicInitInst(Call, MIRBuilder);
1518 case SPIRV::OpAtomicLoad:
1519 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1520 case SPIRV::OpAtomicStore:
1521 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1522 case SPIRV::OpAtomicCompareExchange:
1523 case SPIRV::OpAtomicCompareExchangeWeak:
1524 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1525 GR);
1526 case SPIRV::OpAtomicIAdd:
1527 case SPIRV::OpAtomicISub:
1528 case SPIRV::OpAtomicOr:
1529 case SPIRV::OpAtomicXor:
1530 case SPIRV::OpAtomicAnd:
1531 case SPIRV::OpAtomicExchange:
1532 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1533 case SPIRV::OpMemoryBarrier:
1534 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1535 case SPIRV::OpAtomicFlagTestAndSet:
1536 case SPIRV::OpAtomicFlagClear:
1537 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1538 default:
1539 if (Call->isSpirvOp())
1540 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1541 GR->getSPIRVTypeID(Call->ReturnType));
1542 return false;
1543 }
1544}
1545
1547 MachineIRBuilder &MIRBuilder,
1548 SPIRVGlobalRegistry *GR) {
1549 // Lookup the instruction opcode in the TableGen records.
1550 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1551 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1552
1553 switch (Opcode) {
1554 case SPIRV::OpAtomicFAddEXT:
1555 case SPIRV::OpAtomicFMinEXT:
1556 case SPIRV::OpAtomicFMaxEXT:
1557 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1558 default:
1559 return false;
1560 }
1561}
1562
1564 MachineIRBuilder &MIRBuilder,
1565 SPIRVGlobalRegistry *GR) {
1566 // Lookup the instruction opcode in the TableGen records.
1567 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1568 unsigned Opcode =
1569 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1570
1571 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1572}
1573
1575 MachineIRBuilder &MIRBuilder) {
1576 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1577 .addDef(Call->ReturnRegister)
1578 .addUse(Call->Arguments[0]);
1579 return true;
1580}
1581
1583 MachineIRBuilder &MIRBuilder,
1584 SPIRVGlobalRegistry *GR) {
1585 if (Call->isSpirvOp())
1586 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1587 GR->getSPIRVTypeID(Call->ReturnType));
1588 unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode();
1589 bool IsVec = Opcode == SPIRV::OpTypeVector;
1590 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1591 MIRBuilder.buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1592 .addDef(Call->ReturnRegister)
1593 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1594 .addUse(Call->Arguments[0])
1595 .addUse(Call->Arguments[1]);
1596 return true;
1597}
1598
1600 MachineIRBuilder &MIRBuilder,
1601 SPIRVGlobalRegistry *GR) {
1602 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1603 SPIRV::BuiltIn::BuiltIn Value =
1604 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1605
1606 // For now, we only support a single Wave intrinsic with a single return type.
1607 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1608 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1609
1611 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1612 /* isConst= */ false, /* hasLinkageTy= */ false);
1613}
1614
1615// We expect a builtin
1616// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1617// where %result is a pointer to where the result of the builtin execution
1618// is to be stored, and generate the following instructions:
1619// Res = Opcode RetType Operand1 Operand1
1620// OpStore RetVariable Res
1622 MachineIRBuilder &MIRBuilder,
1623 SPIRVGlobalRegistry *GR) {
1624 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1625 unsigned Opcode =
1626 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1627
1628 Register SRetReg = Call->Arguments[0];
1629 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1630 SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1631 if (!RetType)
1632 report_fatal_error("The first parameter must be a pointer");
1633 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1634 report_fatal_error("Expected struct type result for the arithmetic with "
1635 "overflow builtins");
1636
1637 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1638 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1639 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1640 report_fatal_error("Operands must have the same type");
1641 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1642 switch (Opcode) {
1643 case SPIRV::OpIAddCarryS:
1644 Opcode = SPIRV::OpIAddCarryV;
1645 break;
1646 case SPIRV::OpISubBorrowS:
1647 Opcode = SPIRV::OpISubBorrowV;
1648 break;
1649 }
1650
1651 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1652 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1653 if (const TargetRegisterClass *DstRC =
1654 MRI->getRegClassOrNull(Call->Arguments[1])) {
1655 MRI->setRegClass(ResReg, DstRC);
1656 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1657 } else {
1658 MRI->setType(ResReg, LLT::scalar(64));
1659 }
1660 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1661 MIRBuilder.buildInstr(Opcode)
1662 .addDef(ResReg)
1663 .addUse(GR->getSPIRVTypeID(RetType))
1664 .addUse(Call->Arguments[1])
1665 .addUse(Call->Arguments[2]);
1666 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1667 return true;
1668}
1669
1671 MachineIRBuilder &MIRBuilder,
1672 SPIRVGlobalRegistry *GR) {
1673 // Lookup the builtin record.
1674 SPIRV::BuiltIn::BuiltIn Value =
1675 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1676 uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize ||
1677 Value == SPIRV::BuiltIn::WorkgroupSize ||
1678 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1679 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0);
1680}
1681
1683 MachineIRBuilder &MIRBuilder,
1684 SPIRVGlobalRegistry *GR) {
1685 // Lookup the image size query component number in the TableGen records.
1686 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1687 uint32_t Component =
1688 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1689 // Query result may either be a vector or a scalar. If return type is not a
1690 // vector, expect only a single size component. Otherwise get the number of
1691 // expected components.
1692 SPIRVType *RetTy = Call->ReturnType;
1693 unsigned NumExpectedRetComponents = RetTy->getOpcode() == SPIRV::OpTypeVector
1694 ? RetTy->getOperand(2).getImm()
1695 : 1;
1696 // Get the actual number of query result/size components.
1697 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1698 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1699 Register QueryResult = Call->ReturnRegister;
1700 SPIRVType *QueryResultType = Call->ReturnType;
1701 if (NumExpectedRetComponents != NumActualRetComponents) {
1702 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1703 LLT::fixed_vector(NumActualRetComponents, 32));
1704 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
1705 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1706 QueryResultType = GR->getOrCreateSPIRVVectorType(
1707 IntTy, NumActualRetComponents, MIRBuilder);
1708 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1709 }
1710 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1711 unsigned Opcode =
1712 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1713 auto MIB = MIRBuilder.buildInstr(Opcode)
1714 .addDef(QueryResult)
1715 .addUse(GR->getSPIRVTypeID(QueryResultType))
1716 .addUse(Call->Arguments[0]);
1717 if (!IsDimBuf)
1718 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
1719 if (NumExpectedRetComponents == NumActualRetComponents)
1720 return true;
1721 if (NumExpectedRetComponents == 1) {
1722 // Only 1 component is expected, build OpCompositeExtract instruction.
1723 unsigned ExtractedComposite =
1724 Component == 3 ? NumActualRetComponents - 1 : Component;
1725 assert(ExtractedComposite < NumActualRetComponents &&
1726 "Invalid composite index!");
1727 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1728 SPIRVType *NewType = nullptr;
1729 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1730 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
1731 if (TypeReg != NewTypeReg &&
1732 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
1733 TypeReg = NewTypeReg;
1734 }
1735 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1736 .addDef(Call->ReturnRegister)
1737 .addUse(TypeReg)
1738 .addUse(QueryResult)
1739 .addImm(ExtractedComposite);
1740 if (NewType != nullptr)
1741 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1742 MIRBuilder.getMF().getRegInfo());
1743 } else {
1744 // More than 1 component is expected, fill a new vector.
1745 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1746 .addDef(Call->ReturnRegister)
1747 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1748 .addUse(QueryResult)
1749 .addUse(QueryResult);
1750 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
1751 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
1752 }
1753 return true;
1754}
1755
1757 MachineIRBuilder &MIRBuilder,
1758 SPIRVGlobalRegistry *GR) {
1759 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1760 "Image samples query result must be of int type!");
1761
1762 // Lookup the instruction opcode in the TableGen records.
1763 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1764 unsigned Opcode =
1765 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1766
1767 Register Image = Call->Arguments[0];
1768 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1769 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
1770 (void)ImageDimensionality;
1771
1772 switch (Opcode) {
1773 case SPIRV::OpImageQuerySamples:
1774 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1775 "Image must be of 2D dimensionality");
1776 break;
1777 case SPIRV::OpImageQueryLevels:
1778 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1779 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1780 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1781 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1782 "Image must be of 1D/2D/3D/Cube dimensionality");
1783 break;
1784 }
1785
1786 MIRBuilder.buildInstr(Opcode)
1787 .addDef(Call->ReturnRegister)
1788 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1789 .addUse(Image);
1790 return true;
1791}
1792
1793// TODO: Move to TableGen.
1794static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1796 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1797 case SPIRV::CLK_ADDRESS_CLAMP:
1798 return SPIRV::SamplerAddressingMode::Clamp;
1799 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1800 return SPIRV::SamplerAddressingMode::ClampToEdge;
1801 case SPIRV::CLK_ADDRESS_REPEAT:
1802 return SPIRV::SamplerAddressingMode::Repeat;
1803 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1804 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1805 case SPIRV::CLK_ADDRESS_NONE:
1806 return SPIRV::SamplerAddressingMode::None;
1807 default:
1808 report_fatal_error("Unknown CL address mode");
1809 }
1810}
1811
1812static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
1813 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1814}
1815
1816static SPIRV::SamplerFilterMode::SamplerFilterMode
1818 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1819 return SPIRV::SamplerFilterMode::Linear;
1820 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1821 return SPIRV::SamplerFilterMode::Nearest;
1822 return SPIRV::SamplerFilterMode::Nearest;
1823}
1824
1825static bool generateReadImageInst(const StringRef DemangledCall,
1826 const SPIRV::IncomingCall *Call,
1827 MachineIRBuilder &MIRBuilder,
1828 SPIRVGlobalRegistry *GR) {
1829 Register Image = Call->Arguments[0];
1830 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1831 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
1832 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
1833 if (HasOclSampler) {
1834 Register Sampler = Call->Arguments[1];
1835
1836 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
1837 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
1838 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
1839 Sampler = GR->buildConstantSampler(
1841 getSamplerParamFromBitmask(SamplerMask),
1842 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder,
1843 GR->getSPIRVTypeForVReg(Sampler));
1844 }
1845 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1846 SPIRVType *SampledImageType =
1847 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1848 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1849
1850 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1851 .addDef(SampledImage)
1852 .addUse(GR->getSPIRVTypeID(SampledImageType))
1853 .addUse(Image)
1854 .addUse(Sampler);
1855
1857 MIRBuilder);
1858
1859 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
1860 SPIRVType *TempType =
1861 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder);
1862 Register TempRegister =
1863 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
1864 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
1865 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
1866 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1867 .addDef(TempRegister)
1868 .addUse(GR->getSPIRVTypeID(TempType))
1869 .addUse(SampledImage)
1870 .addUse(Call->Arguments[2]) // Coordinate.
1871 .addImm(SPIRV::ImageOperand::Lod)
1872 .addUse(Lod);
1873 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1874 .addDef(Call->ReturnRegister)
1875 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1876 .addUse(TempRegister)
1877 .addImm(0);
1878 } else {
1879 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1880 .addDef(Call->ReturnRegister)
1881 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1882 .addUse(SampledImage)
1883 .addUse(Call->Arguments[2]) // Coordinate.
1884 .addImm(SPIRV::ImageOperand::Lod)
1885 .addUse(Lod);
1886 }
1887 } else if (HasMsaa) {
1888 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1889 .addDef(Call->ReturnRegister)
1890 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1891 .addUse(Image)
1892 .addUse(Call->Arguments[1]) // Coordinate.
1893 .addImm(SPIRV::ImageOperand::Sample)
1894 .addUse(Call->Arguments[2]);
1895 } else {
1896 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1897 .addDef(Call->ReturnRegister)
1898 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1899 .addUse(Image)
1900 .addUse(Call->Arguments[1]); // Coordinate.
1901 }
1902 return true;
1903}
1904
1906 MachineIRBuilder &MIRBuilder,
1907 SPIRVGlobalRegistry *GR) {
1908 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
1909 .addUse(Call->Arguments[0]) // Image.
1910 .addUse(Call->Arguments[1]) // Coordinate.
1911 .addUse(Call->Arguments[2]); // Texel.
1912 return true;
1913}
1914
1915static bool generateSampleImageInst(const StringRef DemangledCall,
1916 const SPIRV::IncomingCall *Call,
1917 MachineIRBuilder &MIRBuilder,
1918 SPIRVGlobalRegistry *GR) {
1919 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1920 if (Call->Builtin->Name.contains_insensitive(
1921 "__translate_sampler_initializer")) {
1922 // Build sampler literal.
1923 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
1924 Register Sampler = GR->buildConstantSampler(
1925 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
1927 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType);
1928 return Sampler.isValid();
1929 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
1930 // Create OpSampledImage.
1931 Register Image = Call->Arguments[0];
1932 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1933 SPIRVType *SampledImageType =
1934 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1935 Register SampledImage =
1936 Call->ReturnRegister.isValid()
1937 ? Call->ReturnRegister
1938 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1939 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1940 .addDef(SampledImage)
1941 .addUse(GR->getSPIRVTypeID(SampledImageType))
1942 .addUse(Image)
1943 .addUse(Call->Arguments[1]); // Sampler.
1944 return true;
1945 } else if (Call->Builtin->Name.contains_insensitive(
1946 "__spirv_ImageSampleExplicitLod")) {
1947 // Sample an image using an explicit level of detail.
1948 std::string ReturnType = DemangledCall.str();
1949 if (DemangledCall.contains("_R")) {
1950 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
1951 ReturnType = ReturnType.substr(0, ReturnType.find('('));
1952 }
1953 SPIRVType *Type =
1954 Call->ReturnType
1955 ? Call->ReturnType
1956 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder);
1957 if (!Type) {
1958 std::string DiagMsg =
1959 "Unable to recognize SPIRV type name: " + ReturnType;
1960 report_fatal_error(DiagMsg.c_str());
1961 }
1962 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1963 .addDef(Call->ReturnRegister)
1965 .addUse(Call->Arguments[0]) // Image.
1966 .addUse(Call->Arguments[1]) // Coordinate.
1967 .addImm(SPIRV::ImageOperand::Lod)
1968 .addUse(Call->Arguments[3]);
1969 return true;
1970 }
1971 return false;
1972}
1973
1975 MachineIRBuilder &MIRBuilder) {
1976 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
1977 Call->Arguments[1], Call->Arguments[2]);
1978 return true;
1979}
1980
1982 MachineIRBuilder &MIRBuilder,
1983 SPIRVGlobalRegistry *GR) {
1984 return buildOpFromWrapper(MIRBuilder, SPIRV::OpCompositeConstruct, Call,
1985 GR->getSPIRVTypeID(Call->ReturnType));
1986}
1987
1989 MachineIRBuilder &MIRBuilder,
1990 SPIRVGlobalRegistry *GR) {
1991 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1992 unsigned Opcode =
1993 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1994 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
1995 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
1996 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
1997 unsigned ArgSz = Call->Arguments.size();
1998 unsigned LiteralIdx = 0;
1999 switch (Opcode) {
2000 // Memory operand is optional and is literal.
2001 case SPIRV::OpCooperativeMatrixLoadKHR:
2002 LiteralIdx = ArgSz > 3 ? 3 : 0;
2003 break;
2004 case SPIRV::OpCooperativeMatrixStoreKHR:
2005 LiteralIdx = ArgSz > 4 ? 4 : 0;
2006 break;
2007 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2008 LiteralIdx = ArgSz > 7 ? 7 : 0;
2009 break;
2010 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2011 LiteralIdx = ArgSz > 8 ? 8 : 0;
2012 break;
2013 // Cooperative Matrix Operands operand is optional and is literal.
2014 case SPIRV::OpCooperativeMatrixMulAddKHR:
2015 LiteralIdx = ArgSz > 3 ? 3 : 0;
2016 break;
2017 };
2018
2020 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2021 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2022 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2023 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2024 .addUse(Call->Arguments[0]) // pointer
2025 .addUse(Call->Arguments[1]) // rows
2026 .addUse(Call->Arguments[2]) // columns
2027 .addImm(CacheLevel) // cache level
2028 .addUse(Call->Arguments[4]); // memory layout
2029 if (ArgSz > 5)
2030 MIB.addUse(Call->Arguments[5]); // stride
2031 if (ArgSz > 6) {
2032 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2033 MIB.addImm(MemOp); // memory operand
2034 }
2035 return true;
2036 }
2037 if (LiteralIdx > 0)
2038 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2039 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2040 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2041 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2042 if (!CoopMatrType)
2043 report_fatal_error("Can't find a register's type definition");
2044 MIRBuilder.buildInstr(Opcode)
2045 .addDef(Call->ReturnRegister)
2046 .addUse(TypeReg)
2047 .addUse(CoopMatrType->getOperand(0).getReg());
2048 return true;
2049 }
2050 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2051 IsSet ? TypeReg : Register(0), ImmArgs);
2052}
2053
2055 MachineIRBuilder &MIRBuilder,
2056 SPIRVGlobalRegistry *GR) {
2057 // Lookup the instruction opcode in the TableGen records.
2058 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2059 unsigned Opcode =
2060 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2061 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2062
2063 switch (Opcode) {
2064 case SPIRV::OpSpecConstant: {
2065 // Build the SpecID decoration.
2066 unsigned SpecId =
2067 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2068 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2069 {SpecId});
2070 // Determine the constant MI.
2071 Register ConstRegister = Call->Arguments[1];
2072 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2073 assert(Const &&
2074 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2075 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2076 "Argument should be either an int or floating-point constant");
2077 // Determine the opcode and built the OpSpec MI.
2078 const MachineOperand &ConstOperand = Const->getOperand(1);
2079 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2080 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2081 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2082 ? SPIRV::OpSpecConstantTrue
2083 : SPIRV::OpSpecConstantFalse;
2084 }
2085 auto MIB = MIRBuilder.buildInstr(Opcode)
2086 .addDef(Call->ReturnRegister)
2087 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2088
2089 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2090 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2091 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2092 else
2093 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2094 }
2095 return true;
2096 }
2097 case SPIRV::OpSpecConstantComposite: {
2098 auto MIB = MIRBuilder.buildInstr(Opcode)
2099 .addDef(Call->ReturnRegister)
2100 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2101 for (unsigned i = 0; i < Call->Arguments.size(); i++)
2102 MIB.addUse(Call->Arguments[i]);
2103 return true;
2104 }
2105 default:
2106 return false;
2107 }
2108}
2109
2110static bool buildNDRange(const SPIRV::IncomingCall *Call,
2111 MachineIRBuilder &MIRBuilder,
2112 SPIRVGlobalRegistry *GR) {
2113 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2114 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2115 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2116 PtrType->getOperand(2).isReg());
2117 Register TypeReg = PtrType->getOperand(2).getReg();
2119 MachineFunction &MF = MIRBuilder.getMF();
2120 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2121 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2122 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2123 // three other arguments, so pass zero constant on absence.
2124 unsigned NumArgs = Call->Arguments.size();
2125 assert(NumArgs >= 2);
2126 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2127 Register LocalWorkSize =
2128 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2129 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2130 if (NumArgs < 4) {
2131 Register Const;
2132 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2133 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2134 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2135 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2136 DefInstr->getOperand(3).isReg());
2137 Register GWSPtr = DefInstr->getOperand(3).getReg();
2138 // TODO: Maybe simplify generation of the type of the fields.
2139 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2140 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2142 Type *FieldTy = ArrayType::get(BaseTy, Size);
2143 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(FieldTy, MIRBuilder);
2144 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2145 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2146 MIRBuilder.buildInstr(SPIRV::OpLoad)
2147 .addDef(GlobalWorkSize)
2148 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2149 .addUse(GWSPtr);
2150 const SPIRVSubtarget &ST =
2151 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2152 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2153 SpvFieldTy, *ST.getInstrInfo());
2154 } else {
2155 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy);
2156 }
2157 if (!LocalWorkSize.isValid())
2158 LocalWorkSize = Const;
2159 if (!GlobalWorkOffset.isValid())
2160 GlobalWorkOffset = Const;
2161 }
2162 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2163 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2164 .addDef(TmpReg)
2165 .addUse(TypeReg)
2166 .addUse(GlobalWorkSize)
2167 .addUse(LocalWorkSize)
2168 .addUse(GlobalWorkOffset);
2169 return MIRBuilder.buildInstr(SPIRV::OpStore)
2170 .addUse(Call->Arguments[0])
2171 .addUse(TmpReg);
2172}
2173
2174// TODO: maybe move to the global register.
2175static SPIRVType *
2177 SPIRVGlobalRegistry *GR) {
2178 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2179 Type *OpaqueType = StructType::getTypeByName(Context, "spirv.DeviceEvent");
2180 if (!OpaqueType)
2181 OpaqueType = StructType::getTypeByName(Context, "opencl.clk_event_t");
2182 if (!OpaqueType)
2183 OpaqueType = StructType::create(Context, "spirv.DeviceEvent");
2184 unsigned SC0 = storageClassToAddressSpace(SPIRV::StorageClass::Function);
2185 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2186 Type *PtrType = PointerType::get(PointerType::get(OpaqueType, SC0), SC1);
2187 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder);
2188}
2189
2191 MachineIRBuilder &MIRBuilder,
2192 SPIRVGlobalRegistry *GR) {
2193 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2194 const DataLayout &DL = MIRBuilder.getDataLayout();
2195 bool IsSpirvOp = Call->isSpirvOp();
2196 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2197 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2198
2199 // Make vararg instructions before OpEnqueueKernel.
2200 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2201 // local size operands as an array, so we need to unpack them.
2202 SmallVector<Register, 16> LocalSizes;
2203 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2204 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2205 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2206 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2207 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2208 GepMI->getOperand(3).isReg());
2209 Register ArrayReg = GepMI->getOperand(3).getReg();
2210 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2211 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2212 assert(LocalSizeTy && "Local size type is expected");
2213 const uint64_t LocalSizeNum =
2214 cast<ArrayType>(LocalSizeTy)->getNumElements();
2215 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2216 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2217 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2218 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2219 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2220 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2221 MRI->setType(Reg, LLType);
2222 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2223 auto GEPInst = MIRBuilder.buildIntrinsic(
2224 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2225 GEPInst
2226 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2227 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2228 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2229 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2230 LocalSizes.push_back(Reg);
2231 }
2232 }
2233
2234 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2235 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2236 .addDef(Call->ReturnRegister)
2237 .addUse(GR->getSPIRVTypeID(Int32Ty));
2238
2239 // Copy all arguments before block invoke function pointer.
2240 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2241 for (unsigned i = 0; i < BlockFIdx; i++)
2242 MIB.addUse(Call->Arguments[i]);
2243
2244 // If there are no event arguments in the original call, add dummy ones.
2245 if (!HasEvents) {
2246 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2247 Register NullPtr = GR->getOrCreateConstNullPtr(
2248 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2249 MIB.addUse(NullPtr); // Dummy wait events.
2250 MIB.addUse(NullPtr); // Dummy ret event.
2251 }
2252
2253 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2254 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2255 // Invoke: Pointer to invoke function.
2256 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2257
2258 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2259 // Param: Pointer to block literal.
2260 MIB.addUse(BlockLiteralReg);
2261
2262 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2263 // TODO: these numbers should be obtained from block literal structure.
2264 // Param Size: Size of block literal structure.
2265 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2266 // Param Aligment: Aligment of block literal structure.
2267 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2268 MIRBuilder, GR));
2269
2270 for (unsigned i = 0; i < LocalSizes.size(); i++)
2271 MIB.addUse(LocalSizes[i]);
2272 return true;
2273}
2274
2276 MachineIRBuilder &MIRBuilder,
2277 SPIRVGlobalRegistry *GR) {
2278 // Lookup the instruction opcode in the TableGen records.
2279 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2280 unsigned Opcode =
2281 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2282
2283 switch (Opcode) {
2284 case SPIRV::OpRetainEvent:
2285 case SPIRV::OpReleaseEvent:
2286 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2287 case SPIRV::OpCreateUserEvent:
2288 case SPIRV::OpGetDefaultQueue:
2289 return MIRBuilder.buildInstr(Opcode)
2290 .addDef(Call->ReturnRegister)
2291 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2292 case SPIRV::OpIsValidEvent:
2293 return MIRBuilder.buildInstr(Opcode)
2294 .addDef(Call->ReturnRegister)
2295 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2296 .addUse(Call->Arguments[0]);
2297 case SPIRV::OpSetUserEventStatus:
2298 return MIRBuilder.buildInstr(Opcode)
2299 .addUse(Call->Arguments[0])
2300 .addUse(Call->Arguments[1]);
2301 case SPIRV::OpCaptureEventProfilingInfo:
2302 return MIRBuilder.buildInstr(Opcode)
2303 .addUse(Call->Arguments[0])
2304 .addUse(Call->Arguments[1])
2305 .addUse(Call->Arguments[2]);
2306 case SPIRV::OpBuildNDRange:
2307 return buildNDRange(Call, MIRBuilder, GR);
2308 case SPIRV::OpEnqueueKernel:
2309 return buildEnqueueKernel(Call, MIRBuilder, GR);
2310 default:
2311 return false;
2312 }
2313}
2314
2316 MachineIRBuilder &MIRBuilder,
2317 SPIRVGlobalRegistry *GR) {
2318 // Lookup the instruction opcode in the TableGen records.
2319 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2320 unsigned Opcode =
2321 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2322
2323 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2324 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2325 if (Call->isSpirvOp())
2326 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2327 IsSet ? TypeReg : Register(0));
2328
2329 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2330
2331 switch (Opcode) {
2332 case SPIRV::OpGroupAsyncCopy: {
2333 SPIRVType *NewType =
2334 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2335 ? nullptr
2336 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder);
2337 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2338 unsigned NumArgs = Call->Arguments.size();
2339 Register EventReg = Call->Arguments[NumArgs - 1];
2340 bool Res = MIRBuilder.buildInstr(Opcode)
2341 .addDef(Call->ReturnRegister)
2342 .addUse(TypeReg)
2343 .addUse(Scope)
2344 .addUse(Call->Arguments[0])
2345 .addUse(Call->Arguments[1])
2346 .addUse(Call->Arguments[2])
2347 .addUse(Call->Arguments.size() > 4
2348 ? Call->Arguments[3]
2349 : buildConstantIntReg32(1, MIRBuilder, GR))
2350 .addUse(EventReg);
2351 if (NewType != nullptr)
2352 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2353 MIRBuilder.getMF().getRegInfo());
2354 return Res;
2355 }
2356 case SPIRV::OpGroupWaitEvents:
2357 return MIRBuilder.buildInstr(Opcode)
2358 .addUse(Scope)
2359 .addUse(Call->Arguments[0])
2360 .addUse(Call->Arguments[1]);
2361 default:
2362 return false;
2363 }
2364}
2365
2366static bool generateConvertInst(const StringRef DemangledCall,
2367 const SPIRV::IncomingCall *Call,
2368 MachineIRBuilder &MIRBuilder,
2369 SPIRVGlobalRegistry *GR) {
2370 // Lookup the conversion builtin in the TableGen records.
2371 const SPIRV::ConvertBuiltin *Builtin =
2372 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2373
2374 if (!Builtin && Call->isSpirvOp()) {
2375 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2376 unsigned Opcode =
2377 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2378 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2379 GR->getSPIRVTypeID(Call->ReturnType));
2380 }
2381
2382 if (Builtin->IsSaturated)
2383 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2384 SPIRV::Decoration::SaturatedConversion, {});
2385 if (Builtin->IsRounded)
2386 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2387 SPIRV::Decoration::FPRoundingMode,
2388 {(unsigned)Builtin->RoundingMode});
2389
2390 std::string NeedExtMsg; // no errors if empty
2391 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2392 unsigned Opcode = SPIRV::OpNop;
2393 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2394 // Int -> ...
2395 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2396 // Int -> Int
2397 if (Builtin->IsSaturated)
2398 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2399 : SPIRV::OpSatConvertSToU;
2400 else
2401 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2402 : SPIRV::OpSConvert;
2403 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2404 SPIRV::OpTypeFloat)) {
2405 // Int -> Float
2406 if (Builtin->IsBfloat16) {
2407 const auto *ST = static_cast<const SPIRVSubtarget *>(
2408 &MIRBuilder.getMF().getSubtarget());
2409 if (!ST->canUseExtension(
2410 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2411 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2412 IsRightComponentsNumber =
2413 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2414 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2415 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2416 } else {
2417 bool IsSourceSigned =
2418 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2419 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2420 }
2421 }
2422 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2423 SPIRV::OpTypeFloat)) {
2424 // Float -> ...
2425 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2426 // Float -> Int
2427 if (Builtin->IsBfloat16) {
2428 const auto *ST = static_cast<const SPIRVSubtarget *>(
2429 &MIRBuilder.getMF().getSubtarget());
2430 if (!ST->canUseExtension(
2431 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2432 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2433 IsRightComponentsNumber =
2434 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2435 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2436 Opcode = SPIRV::OpConvertFToBF16INTEL;
2437 } else {
2438 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2439 : SPIRV::OpConvertFToU;
2440 }
2441 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2442 SPIRV::OpTypeFloat)) {
2443 // Float -> Float
2444 Opcode = SPIRV::OpFConvert;
2445 }
2446 }
2447
2448 if (!NeedExtMsg.empty()) {
2449 std::string DiagMsg = std::string(Builtin->Name) +
2450 ": the builtin requires the following SPIR-V "
2451 "extension: " +
2452 NeedExtMsg;
2453 report_fatal_error(DiagMsg.c_str(), false);
2454 }
2455 if (!IsRightComponentsNumber) {
2456 std::string DiagMsg =
2457 std::string(Builtin->Name) +
2458 ": result and argument must have the same number of components";
2459 report_fatal_error(DiagMsg.c_str(), false);
2460 }
2461 assert(Opcode != SPIRV::OpNop &&
2462 "Conversion between the types not implemented!");
2463
2464 MIRBuilder.buildInstr(Opcode)
2465 .addDef(Call->ReturnRegister)
2466 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2467 .addUse(Call->Arguments[0]);
2468 return true;
2469}
2470
2472 MachineIRBuilder &MIRBuilder,
2473 SPIRVGlobalRegistry *GR) {
2474 // Lookup the vector load/store builtin in the TableGen records.
2475 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2476 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2477 Call->Builtin->Set);
2478 // Build extended instruction.
2479 auto MIB =
2480 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2481 .addDef(Call->ReturnRegister)
2482 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2483 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2484 .addImm(Builtin->Number);
2485 for (auto Argument : Call->Arguments)
2486 MIB.addUse(Argument);
2487 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2488 MIB.addImm(Builtin->ElementCount);
2489
2490 // Rounding mode should be passed as a last argument in the MI for builtins
2491 // like "vstorea_halfn_r".
2492 if (Builtin->IsRounded)
2493 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2494 return true;
2495}
2496
2498 MachineIRBuilder &MIRBuilder,
2499 SPIRVGlobalRegistry *GR) {
2500 // Lookup the instruction opcode in the TableGen records.
2501 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2502 unsigned Opcode =
2503 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2504 bool IsLoad = Opcode == SPIRV::OpLoad;
2505 // Build the instruction.
2506 auto MIB = MIRBuilder.buildInstr(Opcode);
2507 if (IsLoad) {
2508 MIB.addDef(Call->ReturnRegister);
2509 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2510 }
2511 // Add a pointer to the value to load/store.
2512 MIB.addUse(Call->Arguments[0]);
2513 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2514 // Add a value to store.
2515 if (!IsLoad)
2516 MIB.addUse(Call->Arguments[1]);
2517 // Add optional memory attributes and an alignment.
2518 unsigned NumArgs = Call->Arguments.size();
2519 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
2520 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2521 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
2522 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2523 return true;
2524}
2525
2526namespace SPIRV {
2527// Try to find a builtin function attributes by a demangled function name and
2528// return a tuple <builtin group, op code, ext instruction number>, or a special
2529// tuple value <-1, 0, 0> if the builtin function is not found.
2530// Not all builtin functions are supported, only those with a ready-to-use op
2531// code or instruction number defined in TableGen.
2532// TODO: consider a major rework of mapping demangled calls into a builtin
2533// functions to unify search and decrease number of individual cases.
2534std::tuple<int, unsigned, unsigned>
2535mapBuiltinToOpcode(const StringRef DemangledCall,
2536 SPIRV::InstructionSet::InstructionSet Set) {
2537 Register Reg;
2539 std::unique_ptr<const IncomingCall> Call =
2540 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
2541 if (!Call)
2542 return std::make_tuple(-1, 0, 0);
2543
2544 switch (Call->Builtin->Group) {
2545 case SPIRV::Relational:
2546 case SPIRV::Atomic:
2547 case SPIRV::Barrier:
2548 case SPIRV::CastToPtr:
2549 case SPIRV::ImageMiscQuery:
2550 case SPIRV::SpecConstant:
2551 case SPIRV::Enqueue:
2552 case SPIRV::AsyncCopy:
2553 case SPIRV::LoadStore:
2554 case SPIRV::CoopMatr:
2555 if (const auto *R =
2556 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2557 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2558 break;
2559 case SPIRV::Extended:
2560 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2561 Call->Builtin->Set))
2562 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2563 break;
2564 case SPIRV::VectorLoadStore:
2565 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2566 Call->Builtin->Set))
2567 return std::make_tuple(SPIRV::Extended, 0, R->Number);
2568 break;
2569 case SPIRV::Group:
2570 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2571 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2572 break;
2573 case SPIRV::AtomicFloating:
2574 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2575 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2576 break;
2577 case SPIRV::IntelSubgroups:
2578 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2579 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2580 break;
2581 case SPIRV::GroupUniform:
2582 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2583 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2584 break;
2585 case SPIRV::WriteImage:
2586 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2587 case SPIRV::Select:
2588 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
2589 case SPIRV::Construct:
2590 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2591 0);
2592 case SPIRV::KernelClock:
2593 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2594 default:
2595 return std::make_tuple(-1, 0, 0);
2596 }
2597 return std::make_tuple(-1, 0, 0);
2598}
2599
2600std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
2601 SPIRV::InstructionSet::InstructionSet Set,
2602 MachineIRBuilder &MIRBuilder,
2603 const Register OrigRet, const Type *OrigRetTy,
2604 const SmallVectorImpl<Register> &Args,
2605 SPIRVGlobalRegistry *GR) {
2606 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
2607
2608 // Lookup the builtin in the TableGen records.
2609 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
2610 assert(SpvType && "Inconsistent return register: expected valid type info");
2611 std::unique_ptr<const IncomingCall> Call =
2612 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
2613
2614 if (!Call) {
2615 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
2616 return std::nullopt;
2617 }
2618
2619 // TODO: check if the provided args meet the builtin requirments.
2620 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2621 "Too few arguments to generate the builtin");
2622 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2623 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
2624
2625 // Match the builtin with implementation based on the grouping.
2626 switch (Call->Builtin->Group) {
2627 case SPIRV::Extended:
2628 return generateExtInst(Call.get(), MIRBuilder, GR);
2629 case SPIRV::Relational:
2630 return generateRelationalInst(Call.get(), MIRBuilder, GR);
2631 case SPIRV::Group:
2632 return generateGroupInst(Call.get(), MIRBuilder, GR);
2633 case SPIRV::Variable:
2634 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2635 case SPIRV::Atomic:
2636 return generateAtomicInst(Call.get(), MIRBuilder, GR);
2637 case SPIRV::AtomicFloating:
2638 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2639 case SPIRV::Barrier:
2640 return generateBarrierInst(Call.get(), MIRBuilder, GR);
2641 case SPIRV::CastToPtr:
2642 return generateCastToPtrInst(Call.get(), MIRBuilder);
2643 case SPIRV::Dot:
2644 return generateDotOrFMulInst(Call.get(), MIRBuilder, GR);
2645 case SPIRV::Wave:
2646 return generateWaveInst(Call.get(), MIRBuilder, GR);
2647 case SPIRV::ICarryBorrow:
2648 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
2649 case SPIRV::GetQuery:
2650 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
2651 case SPIRV::ImageSizeQuery:
2652 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
2653 case SPIRV::ImageMiscQuery:
2654 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
2655 case SPIRV::ReadImage:
2656 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2657 case SPIRV::WriteImage:
2658 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
2659 case SPIRV::SampleImage:
2660 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2661 case SPIRV::Select:
2662 return generateSelectInst(Call.get(), MIRBuilder);
2663 case SPIRV::Construct:
2664 return generateConstructInst(Call.get(), MIRBuilder, GR);
2665 case SPIRV::SpecConstant:
2666 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
2667 case SPIRV::Enqueue:
2668 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
2669 case SPIRV::AsyncCopy:
2670 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
2671 case SPIRV::Convert:
2672 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
2673 case SPIRV::VectorLoadStore:
2674 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
2675 case SPIRV::LoadStore:
2676 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
2677 case SPIRV::IntelSubgroups:
2678 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
2679 case SPIRV::GroupUniform:
2680 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
2681 case SPIRV::KernelClock:
2682 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
2683 case SPIRV::CoopMatr:
2684 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
2685 }
2686 return false;
2687}
2688
2690 // Parse strings representing OpenCL builtin types.
2691 if (hasBuiltinTypePrefix(TypeStr)) {
2692 // OpenCL builtin types in demangled call strings have the following format:
2693 // e.g. ocl_image2d_ro
2694 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
2695 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
2696
2697 // Check if this is pointer to a builtin type and not just pointer
2698 // representing a builtin type. In case it is a pointer to builtin type,
2699 // this will require additional handling in the method calling
2700 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
2701 // base types.
2702 if (TypeStr.ends_with("*"))
2703 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
2704
2705 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
2706 Ctx);
2707 }
2708
2709 // Parse type name in either "typeN" or "type vector[N]" format, where
2710 // N is the number of elements of the vector.
2711 Type *BaseType;
2712 unsigned VecElts = 0;
2713
2714 BaseType = parseBasicTypeName(TypeStr, Ctx);
2715 if (!BaseType)
2716 // Unable to recognize SPIRV type name.
2717 return nullptr;
2718
2719 // Handle "typeN*" or "type vector[N]*".
2720 TypeStr.consume_back("*");
2721
2722 if (TypeStr.consume_front(" vector["))
2723 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
2724
2725 TypeStr.getAsInteger(10, VecElts);
2726 if (VecElts > 0)
2728 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
2729
2730 return BaseType;
2731}
2732
2734 const StringRef DemangledCall, LLVMContext &Ctx) {
2735 auto Pos1 = DemangledCall.find('(');
2736 if (Pos1 == StringRef::npos)
2737 return false;
2738 auto Pos2 = DemangledCall.find(')');
2739 if (Pos2 == StringRef::npos || Pos1 > Pos2)
2740 return false;
2741 DemangledCall.slice(Pos1 + 1, Pos2)
2742 .split(BuiltinArgsTypeStrs, ',', -1, false);
2743 return true;
2744}
2745
2747 unsigned ArgIdx, LLVMContext &Ctx) {
2748 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
2749 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
2750 if (ArgIdx >= BuiltinArgsTypeStrs.size())
2751 return nullptr;
2752 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
2753 return parseBuiltinCallArgumentType(TypeStr, Ctx);
2754}
2755
2759};
2760
2761#define GET_BuiltinTypes_DECL
2762#define GET_BuiltinTypes_IMPL
2763
2767};
2768
2769#define GET_OpenCLTypes_DECL
2770#define GET_OpenCLTypes_IMPL
2771
2772#include "SPIRVGenTables.inc"
2773} // namespace SPIRV
2774
2775//===----------------------------------------------------------------------===//
2776// Misc functions for parsing builtin types.
2777//===----------------------------------------------------------------------===//
2778
2780 if (Name.starts_with("void"))
2781 return Type::getVoidTy(Context);
2782 else if (Name.starts_with("int") || Name.starts_with("uint"))
2783 return Type::getInt32Ty(Context);
2784 else if (Name.starts_with("float"))
2785 return Type::getFloatTy(Context);
2786 else if (Name.starts_with("half"))
2787 return Type::getHalfTy(Context);
2788 report_fatal_error("Unable to recognize type!");
2789}
2790
2791//===----------------------------------------------------------------------===//
2792// Implementation functions for builtin types.
2793//===----------------------------------------------------------------------===//
2794
2796 const SPIRV::BuiltinType *TypeRecord,
2797 MachineIRBuilder &MIRBuilder,
2798 SPIRVGlobalRegistry *GR) {
2799 unsigned Opcode = TypeRecord->Opcode;
2800 // Create or get an existing type from GlobalRegistry.
2801 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
2802}
2803
2805 SPIRVGlobalRegistry *GR) {
2806 // Create or get an existing type from GlobalRegistry.
2807 return GR->getOrCreateOpTypeSampler(MIRBuilder);
2808}
2809
2810static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
2811 MachineIRBuilder &MIRBuilder,
2812 SPIRVGlobalRegistry *GR) {
2813 assert(ExtensionType->getNumIntParameters() == 1 &&
2814 "Invalid number of parameters for SPIR-V pipe builtin!");
2815 // Create or get an existing type from GlobalRegistry.
2816 return GR->getOrCreateOpTypePipe(MIRBuilder,
2817 SPIRV::AccessQualifier::AccessQualifier(
2818 ExtensionType->getIntParameter(0)));
2819}
2820
2821static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
2822 MachineIRBuilder &MIRBuilder,
2823 SPIRVGlobalRegistry *GR) {
2824 assert(ExtensionType->getNumIntParameters() == 4 &&
2825 "Invalid number of parameters for SPIR-V coop matrices builtin!");
2826 assert(ExtensionType->getNumTypeParameters() == 1 &&
2827 "SPIR-V coop matrices builtin type must have a type parameter!");
2828 const SPIRVType *ElemType =
2829 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2830 // Create or get an existing type from GlobalRegistry.
2831 return GR->getOrCreateOpTypeCoopMatr(
2832 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
2833 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2834 ExtensionType->getIntParameter(3));
2835}
2836
2837static SPIRVType *
2838getImageType(const TargetExtType *ExtensionType,
2839 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2840 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
2841 assert(ExtensionType->getNumTypeParameters() == 1 &&
2842 "SPIR-V image builtin type must have sampled type parameter!");
2843 const SPIRVType *SampledType =
2844 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2845 assert((ExtensionType->getNumIntParameters() == 7 ||
2846 ExtensionType->getNumIntParameters() == 6) &&
2847 "Invalid number of parameters for SPIR-V image builtin!");
2848
2849 SPIRV::AccessQualifier::AccessQualifier accessQualifier =
2850 SPIRV::AccessQualifier::None;
2851 if (ExtensionType->getNumIntParameters() == 7) {
2852 accessQualifier = Qualifier == SPIRV::AccessQualifier::WriteOnly
2853 ? SPIRV::AccessQualifier::WriteOnly
2854 : SPIRV::AccessQualifier::AccessQualifier(
2855 ExtensionType->getIntParameter(6));
2856 }
2857
2858 // Create or get an existing type from GlobalRegistry.
2859 return GR->getOrCreateOpTypeImage(
2860 MIRBuilder, SampledType,
2861 SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)),
2862 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2863 ExtensionType->getIntParameter(3), ExtensionType->getIntParameter(4),
2864 SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(5)),
2865 accessQualifier);
2866}
2867
2869 MachineIRBuilder &MIRBuilder,
2870 SPIRVGlobalRegistry *GR) {
2871 SPIRVType *OpaqueImageType = getImageType(
2872 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2873 // Create or get an existing type from GlobalRegistry.
2874 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
2875}
2876
2877namespace SPIRV {
2879 LLVMContext &Context) {
2880 StringRef NameWithParameters = TypeName;
2881
2882 // Pointers-to-opaque-structs representing OpenCL types are first translated
2883 // to equivalent SPIR-V types. OpenCL builtin type names should have the
2884 // following format: e.g. %opencl.event_t
2885 if (NameWithParameters.starts_with("opencl.")) {
2886 const SPIRV::OpenCLType *OCLTypeRecord =
2887 SPIRV::lookupOpenCLType(NameWithParameters);
2888 if (!OCLTypeRecord)
2889 report_fatal_error("Missing TableGen record for OpenCL type: " +
2890 NameWithParameters);
2891 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
2892 // Continue with the SPIR-V builtin type...
2893 }
2894
2895 // Names of the opaque structs representing a SPIR-V builtins without
2896 // parameters should have the following format: e.g. %spirv.Event
2897 assert(NameWithParameters.starts_with("spirv.") &&
2898 "Unknown builtin opaque type!");
2899
2900 // Parameterized SPIR-V builtins names follow this format:
2901 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
2902 if (!NameWithParameters.contains('_'))
2903 return TargetExtType::get(Context, NameWithParameters);
2904
2905 SmallVector<StringRef> Parameters;
2906 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
2907 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
2908
2909 SmallVector<Type *, 1> TypeParameters;
2910 bool HasTypeParameter = !isDigit(Parameters[0][0]);
2911 if (HasTypeParameter)
2912 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
2913 SmallVector<unsigned> IntParameters;
2914 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
2915 unsigned IntParameter = 0;
2916 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
2917 (void)ValidLiteral;
2918 assert(ValidLiteral &&
2919 "Invalid format of SPIR-V builtin parameter literal!");
2920 IntParameters.push_back(IntParameter);
2921 }
2922 return TargetExtType::get(Context,
2923 NameWithParameters.substr(0, BaseNameLength),
2924 TypeParameters, IntParameters);
2925}
2926
2928 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2929 MachineIRBuilder &MIRBuilder,
2930 SPIRVGlobalRegistry *GR) {
2931 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
2932 // target(...) target extension types or pointers-to-opaque-structs. The
2933 // approach relying on structs is deprecated and works only in the non-opaque
2934 // pointer mode (-opaque-pointers=0).
2935 // In order to maintain compatibility with LLVM IR generated by older versions
2936 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
2937 // "translated" to target extension types. This translation is temporary and
2938 // will be removed in the future release of LLVM.
2939 const TargetExtType *BuiltinType = dyn_cast<TargetExtType>(OpaqueType);
2940 if (!BuiltinType)
2942 OpaqueType->getStructName().str(), MIRBuilder.getContext());
2943
2944 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
2945
2946 const StringRef Name = BuiltinType->getName();
2947 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
2948
2949 // Lookup the demangled builtin type in the TableGen records.
2950 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
2951 if (!TypeRecord)
2952 report_fatal_error("Missing TableGen record for builtin type: " + Name);
2953
2954 // "Lower" the BuiltinType into TargetType. The following get<...>Type methods
2955 // use the implementation details from TableGen records or TargetExtType
2956 // parameters to either create a new OpType<...> machine instruction or get an
2957 // existing equivalent SPIRVType from GlobalRegistry.
2958 SPIRVType *TargetType;
2959 switch (TypeRecord->Opcode) {
2960 case SPIRV::OpTypeImage:
2961 TargetType = getImageType(BuiltinType, AccessQual, MIRBuilder, GR);
2962 break;
2963 case SPIRV::OpTypePipe:
2964 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
2965 break;
2966 case SPIRV::OpTypeDeviceEvent:
2967 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
2968 break;
2969 case SPIRV::OpTypeSampler:
2970 TargetType = getSamplerType(MIRBuilder, GR);
2971 break;
2972 case SPIRV::OpTypeSampledImage:
2973 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
2974 break;
2975 case SPIRV::OpTypeCooperativeMatrixKHR:
2976 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
2977 break;
2978 default:
2979 TargetType =
2980 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
2981 break;
2982 }
2983
2984 // Emit OpName instruction if a new OpType<...> instruction was added
2985 // (equivalent type was not found in GlobalRegistry).
2986 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
2987 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
2988
2989 return TargetType;
2990}
2991} // namespace SPIRV
2992} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
return RetTy
#define LLVM_DEBUG(...)
Definition: Debug.h:106
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned Reg
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
spirv structurize SPIRV
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
APInt bitcastToAPInt() const
Definition: APFloat.h:1351
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:1081
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1520
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
const APFloat & getValueAPF() const
Definition: Constants.h:314
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:148
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Tagged union holding either a T or a Error.
Definition: Error.h:481
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:563
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:64
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:71
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:577
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:587
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:115
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR=true)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateOpTypeImage(MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVPointerType(SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass=SPIRV::StorageClass::Function)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use)
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR=true, bool ZeroAsNull=true)
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:700
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition: StringRef.h:655
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:470
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:229
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:571
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:265
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:436
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:684
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:424
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:635
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition: StringRef.h:377
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
Definition: StringRef.h:347
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:297
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:277
static constexpr size_t npos
Definition: StringRef.h:53
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & EndsWith(StringLiteral S, T Value)
Definition: StringSwitch.h:76
Class to represent struct types.
Definition: DerivedTypes.h:218
static StructType * getTypeByName(LLVMContext &C, StringRef Name)
Return the type with the specified name, or null if there is none by that name.
Definition: Type.cpp:731
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:612
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Definition: DerivedTypes.h:744
unsigned getNumIntParameters() const
Definition: DerivedTypes.h:802
static TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition: Type.cpp:895
Type * getTypeParameter(unsigned i) const
Definition: DerivedTypes.h:792
unsigned getNumTypeParameters() const
Definition: DerivedTypes.h:793
unsigned getIntParameter(unsigned i) const
Definition: DerivedTypes.h:801
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getHalfTy(LLVMContext &C)
StringRef getStructName() const
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
LLVM Value Representation.
Definition: Value.h:74
Value(Type *Ty, unsigned scid)
Definition: Value.cpp:53
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition: Core.cpp:884
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
StorageClass
Definition: XCOFF.h:170
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:103
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, bool hasLinkageTy=true)
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:83
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition: SPIRVUtils.h:411
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition: SPIRVUtils.h:409
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:326
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:245
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.h:163
static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:130
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
Definition: SPIRVUtils.cpp:748
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
Definition: SPIRVUtils.cpp:459
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:307
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
const MachineInstr SPIRVType
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
bool hasBuiltinTypePrefix(StringRef Name)
Definition: SPIRVUtils.cpp:429
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
Definition: SPIRVUtils.cpp:338
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:332
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:257
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
BuiltIn::BuiltIn Value
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode