184#include "llvm/IR/IntrinsicsAArch64.h"
185#include "llvm/IR/IntrinsicsX86.h"
215#define DEBUG_TYPE "msan"
218 "Controls which checks to insert");
221 "Controls which instruction to instrument");
239 "msan-track-origins",
244 cl::desc(
"keep going after reporting a UMR"),
253 "msan-poison-stack-with-call",
258 "msan-poison-stack-pattern",
259 cl::desc(
"poison uninitialized stack variables with the given pattern"),
264 cl::desc(
"Print name of local stack variable"),
273 cl::desc(
"propagate shadow through ICmpEQ and ICmpNE"),
278 cl::desc(
"exact handling of relational integer ICmp"),
282 "msan-handle-lifetime-intrinsics",
284 "when possible, poison scoped variables at the beginning of the scope "
285 "(slower, but more precise)"),
296 "msan-handle-asm-conservative",
307 "msan-check-access-address",
308 cl::desc(
"report accesses through a pointer which has poisoned shadow"),
313 cl::desc(
"check arguments and return values at function call boundaries"),
317 "msan-dump-strict-instructions",
318 cl::desc(
"print out instructions with default strict semantics"),
322 "msan-dump-strict-intrinsics",
323 cl::desc(
"Prints 'unknown' intrinsics that were handled heuristically. "
324 "Use -msan-dump-strict-instructions to print intrinsics that "
325 "could not be handled exactly nor heuristically."),
329 "msan-instrumentation-with-call-threshold",
331 "If the function being instrumented requires more than "
332 "this number of checks and origin stores, use callbacks instead of "
333 "inline checks (-1 means never use callbacks)."),
338 cl::desc(
"Enable KernelMemorySanitizer instrumentation"),
348 cl::desc(
"Insert checks for constant shadow values"),
355 cl::desc(
"Place MSan constructors in comdat sections"),
361 cl::desc(
"Define custom MSan AndMask"),
365 cl::desc(
"Define custom MSan XorMask"),
369 cl::desc(
"Define custom MSan ShadowBase"),
373 cl::desc(
"Define custom MSan OriginBase"),
378 cl::desc(
"Define threshold for number of checks per "
379 "debug location to force origin update."),
391struct MemoryMapParams {
398struct PlatformMemoryMapParams {
399 const MemoryMapParams *bits32;
400 const MemoryMapParams *bits64;
562class MemorySanitizer {
571 MemorySanitizer(MemorySanitizer &&) =
delete;
572 MemorySanitizer &operator=(MemorySanitizer &&) =
delete;
573 MemorySanitizer(
const MemorySanitizer &) =
delete;
574 MemorySanitizer &operator=(
const MemorySanitizer &) =
delete;
579 friend struct MemorySanitizerVisitor;
580 friend struct VarArgHelperBase;
581 friend struct VarArgAMD64Helper;
582 friend struct VarArgAArch64Helper;
583 friend struct VarArgPowerPCHelper;
584 friend struct VarArgSystemZHelper;
585 friend struct VarArgI386Helper;
586 friend struct VarArgGenericHelper;
588 void initializeModule(
Module &M);
593 template <
typename... ArgsTy>
620 Value *ParamOriginTLS;
626 Value *RetvalOriginTLS;
632 Value *VAArgOriginTLS;
635 Value *VAArgOverflowSizeTLS;
638 bool CallbacksInitialized =
false;
683 Value *MsanMetadataAlloca;
689 const MemoryMapParams *MapParams;
693 MemoryMapParams CustomMapParams;
698 MDNode *OriginStoreWeights;
701void insertModuleCtor(
Module &M) {
729 Recover(getOptOrDefault(
ClKeepGoing, Kernel || R)),
747 MemorySanitizer Msan(*
F.getParent(),
Options);
766 OS, MapClassName2PassName);
773 OS <<
"eager-checks;";
774 OS <<
"track-origins=" <<
Options.TrackOrigins;
790template <
typename... ArgsTy>
797 std::forward<ArgsTy>(Args)...);
800 return M.getOrInsertFunction(
Name, MsanMetadata,
801 std::forward<ArgsTy>(Args)...);
810 RetvalOriginTLS =
nullptr;
812 ParamOriginTLS =
nullptr;
814 VAArgOriginTLS =
nullptr;
815 VAArgOverflowSizeTLS =
nullptr;
817 WarningFn =
M.getOrInsertFunction(
"__msan_warning",
819 IRB.getVoidTy(), IRB.getInt32Ty());
830 MsanGetContextStateFn =
831 M.getOrInsertFunction(
"__msan_get_context_state", PtrTy);
835 for (
int ind = 0, size = 1; ind < 4; ind++,
size <<= 1) {
836 std::string name_load =
837 "__msan_metadata_ptr_for_load_" + std::to_string(size);
838 std::string name_store =
839 "__msan_metadata_ptr_for_store_" + std::to_string(size);
840 MsanMetadataPtrForLoad_1_8[ind] =
841 getOrInsertMsanMetadataFunction(M, name_load, PtrTy);
842 MsanMetadataPtrForStore_1_8[ind] =
843 getOrInsertMsanMetadataFunction(M, name_store, PtrTy);
846 MsanMetadataPtrForLoadN = getOrInsertMsanMetadataFunction(
847 M,
"__msan_metadata_ptr_for_load_n", PtrTy, IRB.getInt64Ty());
848 MsanMetadataPtrForStoreN = getOrInsertMsanMetadataFunction(
849 M,
"__msan_metadata_ptr_for_store_n", PtrTy, IRB.getInt64Ty());
852 MsanPoisonAllocaFn =
M.getOrInsertFunction(
853 "__msan_poison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
854 MsanUnpoisonAllocaFn =
M.getOrInsertFunction(
855 "__msan_unpoison_alloca", IRB.getVoidTy(), PtrTy, IntptrTy);
859 return M.getOrInsertGlobal(
Name, Ty, [&] {
861 nullptr,
Name,
nullptr,
867void MemorySanitizer::createUserspaceApi(
Module &M,
875 StringRef WarningFnName = Recover ?
"__msan_warning_with_origin"
876 :
"__msan_warning_with_origin_noreturn";
877 WarningFn =
M.getOrInsertFunction(WarningFnName,
879 IRB.getVoidTy(), IRB.getInt32Ty());
882 Recover ?
"__msan_warning" :
"__msan_warning_noreturn";
883 WarningFn =
M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
909 VAArgOverflowSizeTLS =
914 unsigned AccessSize = 1 << AccessSizeIndex;
915 std::string FunctionName =
"__msan_maybe_warning_" + itostr(AccessSize);
916 MaybeWarningFn[AccessSizeIndex] =
M.getOrInsertFunction(
918 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), IRB.getInt32Ty());
920 FunctionName =
"__msan_maybe_store_origin_" + itostr(AccessSize);
921 MaybeStoreOriginFn[AccessSizeIndex] =
M.getOrInsertFunction(
923 IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8), PtrTy,
927 MsanSetAllocaOriginWithDescriptionFn =
928 M.getOrInsertFunction(
"__msan_set_alloca_origin_with_descr",
929 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy, PtrTy);
930 MsanSetAllocaOriginNoDescriptionFn =
931 M.getOrInsertFunction(
"__msan_set_alloca_origin_no_descr",
932 IRB.getVoidTy(), PtrTy, IntptrTy, PtrTy);
933 MsanPoisonStackFn =
M.getOrInsertFunction(
"__msan_poison_stack",
934 IRB.getVoidTy(), PtrTy, IntptrTy);
938void MemorySanitizer::initializeCallbacks(
Module &M,
941 if (CallbacksInitialized)
947 MsanChainOriginFn =
M.getOrInsertFunction(
948 "__msan_chain_origin",
951 MsanSetOriginFn =
M.getOrInsertFunction(
953 IRB.getVoidTy(), PtrTy, IntptrTy, IRB.getInt32Ty());
955 M.getOrInsertFunction(
"__msan_memmove", PtrTy, PtrTy, PtrTy, IntptrTy);
957 M.getOrInsertFunction(
"__msan_memcpy", PtrTy, PtrTy, PtrTy, IntptrTy);
958 MemsetFn =
M.getOrInsertFunction(
"__msan_memset",
960 PtrTy, PtrTy, IRB.getInt32Ty(), IntptrTy);
962 MsanInstrumentAsmStoreFn =
M.getOrInsertFunction(
963 "__msan_instrument_asm_store", IRB.getVoidTy(), PtrTy, IntptrTy);
966 createKernelApi(M, TLI);
968 createUserspaceApi(M, TLI);
970 CallbacksInitialized =
true;
976 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
994void MemorySanitizer::initializeModule(
Module &M) {
995 auto &
DL =
M.getDataLayout();
997 TargetTriple =
Triple(
M.getTargetTriple());
999 bool ShadowPassed =
ClShadowBase.getNumOccurrences() > 0;
1000 bool OriginPassed =
ClOriginBase.getNumOccurrences() > 0;
1002 if (ShadowPassed || OriginPassed) {
1007 MapParams = &CustomMapParams;
1009 switch (TargetTriple.getOS()) {
1011 switch (TargetTriple.getArch()) {
1026 switch (TargetTriple.getArch()) {
1035 switch (TargetTriple.getArch()) {
1069 C = &(
M.getContext());
1071 IntptrTy = IRB.getIntPtrTy(
DL);
1072 OriginTy = IRB.getInt32Ty();
1073 PtrTy = IRB.getPtrTy();
1078 if (!CompileKernel) {
1080 M.getOrInsertGlobal(
"__msan_track_origins", IRB.getInt32Ty(), [&] {
1081 return new GlobalVariable(
1082 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
1083 IRB.getInt32(TrackOrigins),
"__msan_track_origins");
1087 M.getOrInsertGlobal(
"__msan_keep_going", IRB.getInt32Ty(), [&] {
1088 return new GlobalVariable(M, IRB.getInt32Ty(), true,
1089 GlobalValue::WeakODRLinkage,
1090 IRB.getInt32(Recover),
"__msan_keep_going");
1105struct VarArgHelper {
1106 virtual ~VarArgHelper() =
default;
1121 virtual void finalizeInstrumentation() = 0;
1124struct MemorySanitizerVisitor;
1129 MemorySanitizerVisitor &Visitor);
1136 if (TypeSizeFixed <= 8)
1145class NextNodeIRBuilder :
public IRBuilder<> {
1158struct MemorySanitizerVisitor :
public InstVisitor<MemorySanitizerVisitor> {
1160 MemorySanitizer &MS;
1163 std::unique_ptr<VarArgHelper> VAHelper;
1171 bool PropagateShadow;
1175 struct ShadowOriginAndInsertPoint {
1181 : Shadow(S), Origin(
O), OrigIns(
I) {}
1189 int64_t SplittableBlocksCount = 0;
1191 MemorySanitizerVisitor(
Function &
F, MemorySanitizer &MS,
1194 bool SanitizeFunction =
1196 InsertChecks = SanitizeFunction;
1197 PropagateShadow = SanitizeFunction;
1207 MS.initializeCallbacks(*
F.getParent(), TLI);
1209 IRBuilder<>(&
F.getEntryBlock(),
F.getEntryBlock().getFirstNonPHIIt())
1212 if (MS.CompileKernel) {
1214 insertKmsanPrologue(IRB);
1218 <<
"MemorySanitizer is not inserting checks into '"
1219 <<
F.getName() <<
"'\n");
1222 bool instrumentWithCalls(
Value *V) {
1224 if (isa<Constant>(V))
1227 ++SplittableBlocksCount;
1233 return I.getParent() == FnPrologueEnd->
getParent() &&
1234 (&
I == FnPrologueEnd ||
I.comesBefore(FnPrologueEnd));
1242 if (MS.TrackOrigins <= 1)
1244 return IRB.
CreateCall(MS.MsanChainOriginFn, V);
1249 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1261 const Align IntptrAlignment =
DL.getABITypeAlign(MS.IntptrTy);
1262 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
1274 auto [InsertPt,
Index] =
1286 Align CurrentAlignment = Alignment;
1287 if (Alignment >= IntptrAlignment && IntptrSize >
kOriginSize) {
1288 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1290 for (
unsigned i = 0; i <
Size / IntptrSize; ++i) {
1295 CurrentAlignment = IntptrAlignment;
1313 Value *ConvertedShadow = convertShadowToScalar(Shadow, IRB);
1314 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1322 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1331 if (instrumentWithCalls(ConvertedShadow) &&
1334 Value *ConvertedShadow2 =
1340 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1344 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1349 void materializeStores() {
1352 Value *Val =
SI->getValueOperand();
1354 Value *Shadow =
SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1355 Value *ShadowPtr, *OriginPtr;
1357 const Align Alignment =
SI->getAlign();
1359 std::tie(ShadowPtr, OriginPtr) =
1360 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
true);
1369 if (MS.TrackOrigins && !
SI->isAtomic())
1370 storeOrigin(IRB,
Addr, Shadow, getOrigin(Val), OriginPtr,
1377 if (MS.TrackOrigins < 2)
1380 if (LazyWarningDebugLocationCount.
empty())
1381 for (
const auto &
I : InstrumentationList)
1382 ++LazyWarningDebugLocationCount[
I.OrigIns->getDebugLoc()];
1396 if (
Instruction *OI = dyn_cast_or_null<Instruction>(Origin)) {
1398 auto NewDebugLoc = OI->getDebugLoc();
1405 IRBOrigin.SetCurrentDebugLocation(NewDebugLoc);
1406 Origin = updateOrigin(Origin, IRBOrigin);
1411 if (MS.CompileKernel || MS.TrackOrigins)
1425 if (instrumentWithCalls(ConvertedShadow) &&
1429 ConvertedShadow = convertShadowToScalar(ConvertedShadow, IRB);
1430 Value *ConvertedShadow2 =
1433 Fn, {ConvertedShadow2,
1434 MS.TrackOrigins && Origin ? Origin : (
Value *)IRB.
getInt32(0)});
1438 Value *
Cmp = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1441 !MS.Recover, MS.ColdCallWeights);
1444 insertWarningFn(IRB, Origin);
1449 void materializeInstructionChecks(
1454 bool Combine = !MS.TrackOrigins;
1456 Value *Shadow =
nullptr;
1457 for (
const auto &ShadowData : InstructionChecks) {
1461 Value *ConvertedShadow = ShadowData.Shadow;
1463 if (
auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1470 insertWarningFn(IRB, ShadowData.Origin);
1480 materializeOneCheck(IRB, ConvertedShadow, ShadowData.Origin);
1485 Shadow = ConvertedShadow;
1489 Shadow = convertToBool(Shadow, IRB,
"_mscmp");
1490 ConvertedShadow = convertToBool(ConvertedShadow, IRB,
"_mscmp");
1491 Shadow = IRB.
CreateOr(Shadow, ConvertedShadow,
"_msor");
1497 materializeOneCheck(IRB, Shadow,
nullptr);
1501 void materializeChecks() {
1507 for (
auto I = InstrumentationList.begin();
1508 I != InstrumentationList.end();) {
1509 auto OrigIns =
I->OrigIns;
1513 auto J = std::find_if(
I + 1, InstrumentationList.end(),
1514 [OrigIns](
const ShadowOriginAndInsertPoint &R) {
1515 return OrigIns != R.OrigIns;
1529 MS.ParamTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1530 {Zero, IRB.getInt32(0)},
"param_shadow");
1531 MS.RetvalTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1532 {Zero, IRB.getInt32(1)},
"retval_shadow");
1533 MS.VAArgTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1534 {Zero, IRB.getInt32(2)},
"va_arg_shadow");
1535 MS.VAArgOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1536 {Zero, IRB.getInt32(3)},
"va_arg_origin");
1537 MS.VAArgOverflowSizeTLS =
1538 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1539 {Zero, IRB.getInt32(4)},
"va_arg_overflow_size");
1540 MS.ParamOriginTLS = IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1541 {Zero, IRB.getInt32(5)},
"param_origin");
1542 MS.RetvalOriginTLS =
1543 IRB.
CreateGEP(MS.MsanContextStateTy, ContextState,
1544 {Zero, IRB.getInt32(6)},
"retval_origin");
1546 MS.MsanMetadataAlloca = IRB.
CreateAlloca(MS.MsanMetadata, 0u);
1563 for (
PHINode *PN : ShadowPHINodes) {
1564 PHINode *PNS = cast<PHINode>(getShadow(PN));
1565 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1566 size_t NumValues = PN->getNumIncomingValues();
1567 for (
size_t v = 0;
v < NumValues;
v++) {
1568 PNS->
addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1570 PNO->
addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1574 VAHelper->finalizeInstrumentation();
1578 if (InstrumentLifetimeStart) {
1579 for (
auto Item : LifetimeStartList) {
1580 instrumentAlloca(*Item.second, Item.first);
1581 AllocaSet.
remove(Item.second);
1587 instrumentAlloca(*AI);
1590 materializeChecks();
1594 materializeStores();
1600 Type *getShadowTy(
Value *V) {
return getShadowTy(
V->getType()); }
1612 if (
VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1613 uint32_t EltSize =
DL.getTypeSizeInBits(VT->getElementType());
1615 VT->getElementCount());
1617 if (
ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1618 return ArrayType::get(getShadowTy(AT->getElementType()),
1619 AT->getNumElements());
1621 if (
StructType *ST = dyn_cast<StructType>(OrigTy)) {
1623 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1624 Elements.push_back(getShadowTy(
ST->getElementType(i)));
1626 LLVM_DEBUG(
dbgs() <<
"getShadowTy: " << *ST <<
" ===> " << *Res <<
"\n");
1642 Value *ShadowBool = convertToBool(ShadowItem, IRB);
1644 if (Aggregator != FalseVal)
1645 Aggregator = IRB.
CreateOr(Aggregator, ShadowBool);
1647 Aggregator = ShadowBool;
1656 if (!
Array->getNumElements())
1660 Value *Aggregator = convertShadowToScalar(FirstItem, IRB);
1664 Value *ShadowInner = convertShadowToScalar(ShadowItem, IRB);
1665 Aggregator = IRB.
CreateOr(Aggregator, ShadowInner);
1675 return collapseStructShadow(
Struct, V, IRB);
1676 if (
ArrayType *Array = dyn_cast<ArrayType>(
V->getType()))
1677 return collapseArrayShadow(Array, V, IRB);
1678 if (isa<VectorType>(
V->getType())) {
1679 if (isa<ScalableVectorType>(
V->getType()))
1682 V->getType()->getPrimitiveSizeInBits().getFixedValue();
1690 Type *VTy =
V->getType();
1692 return convertToBool(convertShadowToScalar(V, IRB), IRB,
name);
1699 Type *ptrToIntPtrType(
Type *PtrTy)
const {
1700 if (
VectorType *VectTy = dyn_cast<VectorType>(PtrTy)) {
1701 return VectorType::get(ptrToIntPtrType(VectTy->getElementType()),
1702 VectTy->getElementCount());
1708 Type *getPtrToShadowPtrType(
Type *IntPtrTy,
Type *ShadowTy)
const {
1709 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1710 return VectorType::get(
1711 getPtrToShadowPtrType(VectTy->getElementType(), ShadowTy),
1712 VectTy->getElementCount());
1714 assert(IntPtrTy == MS.IntptrTy);
1719 if (
VectorType *VectTy = dyn_cast<VectorType>(IntPtrTy)) {
1721 VectTy->getElementCount(),
1722 constToIntPtr(VectTy->getElementType(),
C));
1724 assert(IntPtrTy == MS.IntptrTy);
1725 return ConstantInt::get(MS.IntptrTy,
C);
1738 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1741 if (
uint64_t AndMask = MS.MapParams->AndMask)
1742 OffsetLong = IRB.
CreateAnd(OffsetLong, constToIntPtr(IntptrTy, ~AndMask));
1744 if (
uint64_t XorMask = MS.MapParams->XorMask)
1745 OffsetLong = IRB.
CreateXor(OffsetLong, constToIntPtr(IntptrTy, XorMask));
1757 std::pair<Value *, Value *>
1764 assert(VectTy->getElementType()->isPointerTy());
1766 Type *IntptrTy = ptrToIntPtrType(
Addr->getType());
1767 Value *ShadowOffset = getShadowPtrOffset(
Addr, IRB);
1768 Value *ShadowLong = ShadowOffset;
1769 if (
uint64_t ShadowBase = MS.MapParams->ShadowBase) {
1771 IRB.
CreateAdd(ShadowLong, constToIntPtr(IntptrTy, ShadowBase));
1774 ShadowLong, getPtrToShadowPtrType(IntptrTy, ShadowTy));
1776 Value *OriginPtr =
nullptr;
1777 if (MS.TrackOrigins) {
1778 Value *OriginLong = ShadowOffset;
1779 uint64_t OriginBase = MS.MapParams->OriginBase;
1780 if (OriginBase != 0)
1782 IRB.
CreateAdd(OriginLong, constToIntPtr(IntptrTy, OriginBase));
1785 OriginLong = IRB.
CreateAnd(OriginLong, constToIntPtr(IntptrTy, ~Mask));
1788 OriginLong, getPtrToShadowPtrType(IntptrTy, MS.OriginTy));
1790 return std::make_pair(ShadowPtr, OriginPtr);
1793 template <
typename... ArgsTy>
1798 {MS.MsanMetadataAlloca, std::forward<ArgsTy>(Args)...});
1799 return IRB.
CreateLoad(MS.MsanMetadata, MS.MsanMetadataAlloca);
1802 return IRB.
CreateCall(Callee, {std::forward<ArgsTy>(Args)...});
1805 std::pair<Value *, Value *> getShadowOriginPtrKernelNoVec(
Value *
Addr,
1809 Value *ShadowOriginPtrs;
1816 ShadowOriginPtrs = createMetadataCall(IRB, Getter, AddrCast);
1818 Value *SizeVal = ConstantInt::get(MS.IntptrTy,
Size);
1819 ShadowOriginPtrs = createMetadataCall(
1821 isStore ? MS.MsanMetadataPtrForStoreN : MS.MsanMetadataPtrForLoadN,
1828 return std::make_pair(ShadowPtr, OriginPtr);
1834 std::pair<Value *, Value *> getShadowOriginPtrKernel(
Value *
Addr,
1841 return getShadowOriginPtrKernelNoVec(
Addr, IRB, ShadowTy,
isStore);
1845 unsigned NumElements = cast<FixedVectorType>(VectTy)->getNumElements();
1846 Value *ShadowPtrs = ConstantInt::getNullValue(
1848 Value *OriginPtrs =
nullptr;
1849 if (MS.TrackOrigins)
1850 OriginPtrs = ConstantInt::getNullValue(
1852 for (
unsigned i = 0; i < NumElements; ++i) {
1855 auto [ShadowPtr, OriginPtr] =
1856 getShadowOriginPtrKernelNoVec(OneAddr, IRB, ShadowTy,
isStore);
1859 ShadowPtrs, ShadowPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1860 if (MS.TrackOrigins)
1862 OriginPtrs, OriginPtr, ConstantInt::get(IRB.
getInt32Ty(), i));
1864 return {ShadowPtrs, OriginPtrs};
1871 if (MS.CompileKernel)
1872 return getShadowOriginPtrKernel(
Addr, IRB, ShadowTy,
isStore);
1873 return getShadowOriginPtrUserspace(
Addr, IRB, ShadowTy, Alignment);
1888 if (!MS.TrackOrigins)
1902 Value *getOriginPtrForRetval() {
1904 return MS.RetvalOriginTLS;
1909 assert(!ShadowMap.
count(V) &&
"Values may only have one shadow");
1910 ShadowMap[
V] = PropagateShadow ? SV : getCleanShadow(V);
1915 if (!MS.TrackOrigins)
1917 assert(!OriginMap.
count(V) &&
"Values may only have one origin");
1918 LLVM_DEBUG(
dbgs() <<
"ORIGIN: " << *V <<
" ==> " << *Origin <<
"\n");
1919 OriginMap[
V] = Origin;
1923 Type *ShadowTy = getShadowTy(OrigTy);
1933 Constant *getCleanShadow(
Value *V) {
return getCleanShadow(
V->getType()); }
1938 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1940 if (
ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1942 getPoisonedShadow(AT->getElementType()));
1945 if (
StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1947 for (
unsigned i = 0, n =
ST->getNumElements(); i < n; i++)
1948 Vals.
push_back(getPoisonedShadow(
ST->getElementType(i)));
1956 Type *ShadowTy = getShadowTy(V);
1959 return getPoisonedShadow(ShadowTy);
1971 if (!PropagateShadow ||
I->getMetadata(LLVMContext::MD_nosanitize))
1972 return getCleanShadow(V);
1974 Value *Shadow = ShadowMap[
V];
1976 LLVM_DEBUG(
dbgs() <<
"No shadow: " << *V <<
"\n" << *(
I->getParent()));
1978 assert(Shadow &&
"No shadow for a value");
1982 if (
UndefValue *U = dyn_cast<UndefValue>(V)) {
1983 Value *
AllOnes = (PropagateShadow && PoisonUndef) ? getPoisonedShadow(V)
1984 : getCleanShadow(V);
1989 if (
Argument *
A = dyn_cast<Argument>(V)) {
1991 Value *&ShadowPtr = ShadowMap[
V];
1996 unsigned ArgOffset = 0;
1998 for (
auto &FArg :
F->args()) {
1999 if (!FArg.getType()->isSized() || FArg.getType()->isScalableTy()) {
2001 ?
"vscale not fully supported\n"
2002 :
"Arg is not sized\n"));
2004 ShadowPtr = getCleanShadow(V);
2005 setOrigin(
A, getCleanOrigin());
2011 unsigned Size = FArg.hasByValAttr()
2012 ?
DL.getTypeAllocSize(FArg.getParamByValType())
2013 :
DL.getTypeAllocSize(FArg.getType());
2017 if (FArg.hasByValAttr()) {
2021 const Align ArgAlign =
DL.getValueOrABITypeAlignment(
2022 FArg.getParamAlign(), FArg.getParamByValType());
2023 Value *CpShadowPtr, *CpOriginPtr;
2024 std::tie(CpShadowPtr, CpOriginPtr) =
2025 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
2027 if (!PropagateShadow || Overflow) {
2029 EntryIRB.CreateMemSet(
2033 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2035 Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign,
Base,
2040 if (MS.TrackOrigins) {
2041 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2045 EntryIRB.CreateMemCpy(
2054 if (!PropagateShadow || Overflow || FArg.hasByValAttr() ||
2055 (MS.EagerChecks && FArg.hasAttribute(Attribute::NoUndef))) {
2056 ShadowPtr = getCleanShadow(V);
2057 setOrigin(
A, getCleanOrigin());
2060 Value *
Base = getShadowPtrForArgument(EntryIRB, ArgOffset);
2061 ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg),
Base,
2063 if (MS.TrackOrigins) {
2064 Value *OriginPtr = getOriginPtrForArgument(EntryIRB, ArgOffset);
2065 setOrigin(
A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
2069 <<
" ARG: " << FArg <<
" ==> " << *ShadowPtr <<
"\n");
2075 assert(ShadowPtr &&
"Could not find shadow for an argument");
2079 return getCleanShadow(V);
2084 return getShadow(
I->getOperand(i));
2089 if (!MS.TrackOrigins)
2091 if (!PropagateShadow || isa<Constant>(V) || isa<InlineAsm>(V))
2092 return getCleanOrigin();
2093 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
2094 "Unexpected value type in getOrigin()");
2096 if (
I->getMetadata(LLVMContext::MD_nosanitize))
2097 return getCleanOrigin();
2099 Value *Origin = OriginMap[
V];
2100 assert(Origin &&
"Missing origin");
2106 return getOrigin(
I->getOperand(i));
2119 LLVM_DEBUG(
dbgs() <<
"Skipping check of " << *Shadow <<
" before "
2120 << *OrigIns <<
"\n");
2125 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy) ||
2126 isa<StructType>(ShadowTy) || isa<ArrayType>(ShadowTy)) &&
2127 "Can only insert checks for integer, vector, and aggregate shadow "
2130 InstrumentationList.push_back(
2131 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
2140 Value *Shadow, *Origin;
2142 Shadow = getShadow(Val);
2145 Origin = getOrigin(Val);
2147 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
2150 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
2152 insertShadowCheck(Shadow, Origin, OrigIns);
2157 case AtomicOrdering::NotAtomic:
2158 return AtomicOrdering::NotAtomic;
2159 case AtomicOrdering::Unordered:
2160 case AtomicOrdering::Monotonic:
2161 case AtomicOrdering::Release:
2162 return AtomicOrdering::Release;
2163 case AtomicOrdering::Acquire:
2164 case AtomicOrdering::AcquireRelease:
2165 return AtomicOrdering::AcquireRelease;
2166 case AtomicOrdering::SequentiallyConsistent:
2167 return AtomicOrdering::SequentiallyConsistent;
2173 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2174 uint32_t OrderingTable[NumOrderings] = {};
2176 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2177 OrderingTable[(
int)AtomicOrderingCABI::release] =
2178 (int)AtomicOrderingCABI::release;
2179 OrderingTable[(int)AtomicOrderingCABI::consume] =
2180 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2181 OrderingTable[(int)AtomicOrderingCABI::acq_rel] =
2182 (
int)AtomicOrderingCABI::acq_rel;
2183 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2184 (
int)AtomicOrderingCABI::seq_cst;
2191 case AtomicOrdering::NotAtomic:
2192 return AtomicOrdering::NotAtomic;
2193 case AtomicOrdering::Unordered:
2194 case AtomicOrdering::Monotonic:
2195 case AtomicOrdering::Acquire:
2196 return AtomicOrdering::Acquire;
2197 case AtomicOrdering::Release:
2198 case AtomicOrdering::AcquireRelease:
2199 return AtomicOrdering::AcquireRelease;
2200 case AtomicOrdering::SequentiallyConsistent:
2201 return AtomicOrdering::SequentiallyConsistent;
2207 constexpr int NumOrderings = (int)AtomicOrderingCABI::seq_cst + 1;
2208 uint32_t OrderingTable[NumOrderings] = {};
2210 OrderingTable[(int)AtomicOrderingCABI::relaxed] =
2211 OrderingTable[(
int)AtomicOrderingCABI::acquire] =
2212 OrderingTable[(int)AtomicOrderingCABI::consume] =
2213 (
int)AtomicOrderingCABI::acquire;
2214 OrderingTable[(int)AtomicOrderingCABI::release] =
2215 OrderingTable[(
int)AtomicOrderingCABI::acq_rel] =
2216 (int)AtomicOrderingCABI::acq_rel;
2217 OrderingTable[(int)AtomicOrderingCABI::seq_cst] =
2218 (
int)AtomicOrderingCABI::seq_cst;
2226 if (
I.getMetadata(LLVMContext::MD_nosanitize))
2229 if (isInPrologue(
I))
2234 setShadow(&
I, getCleanShadow(&
I));
2235 setOrigin(&
I, getCleanOrigin());
2247 assert(
I.getType()->isSized() &&
"Load type must have size");
2248 assert(!
I.getMetadata(LLVMContext::MD_nosanitize));
2249 NextNodeIRBuilder IRB(&
I);
2250 Type *ShadowTy = getShadowTy(&
I);
2252 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2253 const Align Alignment =
I.getAlign();
2254 if (PropagateShadow) {
2255 std::tie(ShadowPtr, OriginPtr) =
2256 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2260 setShadow(&
I, getCleanShadow(&
I));
2264 insertShadowCheck(
I.getPointerOperand(), &
I);
2269 if (MS.TrackOrigins) {
2270 if (PropagateShadow) {
2275 setOrigin(&
I, getCleanOrigin());
2285 StoreList.push_back(&
I);
2287 insertShadowCheck(
I.getPointerOperand(), &
I);
2291 assert(isa<AtomicRMWInst>(
I) || isa<AtomicCmpXchgInst>(
I));
2295 Value *Val =
I.getOperand(1);
2296 Value *ShadowPtr = getShadowOriginPtr(
Addr, IRB, getShadowTy(Val),
Align(1),
2301 insertShadowCheck(
Addr, &
I);
2306 if (isa<AtomicCmpXchgInst>(
I))
2307 insertShadowCheck(Val, &
I);
2311 setShadow(&
I, getCleanShadow(&
I));
2312 setOrigin(&
I, getCleanOrigin());
2327 insertShadowCheck(
I.getOperand(1), &
I);
2331 setOrigin(&
I, getOrigin(&
I, 0));
2335 insertShadowCheck(
I.getOperand(2), &
I);
2337 auto *Shadow0 = getShadow(&
I, 0);
2338 auto *Shadow1 = getShadow(&
I, 1);
2341 setOriginForNaryOp(
I);
2346 auto *Shadow0 = getShadow(&
I, 0);
2347 auto *Shadow1 = getShadow(&
I, 1);
2350 setOriginForNaryOp(
I);
2356 setShadow(&
I, IRB.
CreateSExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2357 setOrigin(&
I, getOrigin(&
I, 0));
2362 setShadow(&
I, IRB.
CreateZExt(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2363 setOrigin(&
I, getOrigin(&
I, 0));
2368 setShadow(&
I, IRB.
CreateTrunc(getShadow(&
I, 0),
I.getType(),
"_msprop"));
2369 setOrigin(&
I, getOrigin(&
I, 0));
2376 if (
auto *CI = dyn_cast<CallInst>(
I.getOperand(0)))
2377 if (CI->isMustTailCall())
2381 setOrigin(&
I, getOrigin(&
I, 0));
2387 "_msprop_ptrtoint"));
2388 setOrigin(&
I, getOrigin(&
I, 0));
2394 "_msprop_inttoptr"));
2395 setOrigin(&
I, getOrigin(&
I, 0));
2398 void visitFPToSIInst(
CastInst &
I) { handleShadowOr(
I); }
2399 void visitFPToUIInst(
CastInst &
I) { handleShadowOr(
I); }
2400 void visitSIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2401 void visitUIToFPInst(
CastInst &
I) { handleShadowOr(
I); }
2402 void visitFPExtInst(
CastInst &
I) { handleShadowOr(
I); }
2403 void visitFPTruncInst(
CastInst &
I) { handleShadowOr(
I); }
2418 Value *S2 = getShadow(&
I, 1);
2419 Value *V1 =
I.getOperand(0);
2428 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2429 setOriginForNaryOp(
I);
2440 Value *S2 = getShadow(&
I, 1);
2450 setShadow(&
I, IRB.
CreateOr({S1S2, V1S2, S1V2}));
2451 setOriginForNaryOp(
I);
2469 template <
bool CombineShadow>
class Combiner {
2470 Value *Shadow =
nullptr;
2471 Value *Origin =
nullptr;
2473 MemorySanitizerVisitor *MSV;
2477 : IRB(IRB), MSV(MSV) {}
2481 if (CombineShadow) {
2486 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2487 Shadow = IRB.
CreateOr(Shadow, OpShadow,
"_msprop");
2491 if (MSV->MS.TrackOrigins) {
2496 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2498 if (!ConstOrigin || !ConstOrigin->
isNullValue()) {
2499 Value *
Cond = MSV->convertToBool(OpShadow, IRB);
2509 Value *OpShadow = MSV->getShadow(V);
2510 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) :
nullptr;
2511 return Add(OpShadow, OpOrigin);
2517 if (CombineShadow) {
2519 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(
I));
2520 MSV->setShadow(
I, Shadow);
2522 if (MSV->MS.TrackOrigins) {
2524 MSV->setOrigin(
I, Origin);
2531 if (MSV->MS.TrackOrigins) {
2543 if (!MS.TrackOrigins)
2546 OriginCombiner
OC(
this, IRB);
2547 for (
Use &
Op :
I.operands())
2552 size_t VectorOrPrimitiveTypeSizeInBits(
Type *Ty) {
2554 "Vector of pointers is not a valid shadow type");
2555 return Ty->
isVectorTy() ? cast<FixedVectorType>(Ty)->getNumElements() *
2564 Type *srcTy =
V->getType();
2567 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2568 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2569 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2575 cast<VectorType>(dstTy)->getElementCount() ==
2576 cast<VectorType>(srcTy)->getElementCount())
2587 Type *ShadowTy = getShadowTy(V);
2588 if (
V->getType() == ShadowTy)
2590 if (
V->getType()->isPtrOrPtrVectorTy())
2599 ShadowAndOriginCombiner
SC(
this, IRB);
2600 for (
Use &
Op :
I.operands())
2620 if (
auto *VTy = dyn_cast<VectorType>(Ty)) {
2621 unsigned NumElements = cast<FixedVectorType>(VTy)->getNumElements();
2622 Type *EltTy = VTy->getElementType();
2624 for (
unsigned Idx = 0;
Idx < NumElements; ++
Idx) {
2627 const APInt &
V = Elt->getValue();
2629 Elements.push_back(ConstantInt::get(EltTy, V2));
2631 Elements.push_back(ConstantInt::get(EltTy, 1));
2636 if (
ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2637 const APInt &
V = Elt->getValue();
2639 ShadowMul = ConstantInt::get(Ty, V2);
2641 ShadowMul = ConstantInt::get(Ty, 1);
2647 IRB.
CreateMul(getShadow(OtherArg), ShadowMul,
"msprop_mul_cst"));
2648 setOrigin(&
I, getOrigin(OtherArg));
2652 Constant *constOp0 = dyn_cast<Constant>(
I.getOperand(0));
2653 Constant *constOp1 = dyn_cast<Constant>(
I.getOperand(1));
2654 if (constOp0 && !constOp1)
2655 handleMulByConstant(
I, constOp0,
I.getOperand(1));
2656 else if (constOp1 && !constOp0)
2657 handleMulByConstant(
I, constOp1,
I.getOperand(0));
2672 insertShadowCheck(
I.getOperand(1), &
I);
2673 setShadow(&
I, getShadow(&
I, 0));
2674 setOrigin(&
I, getOrigin(&
I, 0));
2691 void handleEqualityComparison(
ICmpInst &
I) {
2695 Value *Sa = getShadow(
A);
2696 Value *Sb = getShadow(
B);
2722 setOriginForNaryOp(
I);
2730 void handleRelationalComparisonExact(
ICmpInst &
I) {
2734 Value *Sa = getShadow(
A);
2735 Value *Sb = getShadow(
B);
2746 bool IsSigned =
I.isSigned();
2748 auto GetMinMaxUnsigned = [&](
Value *
V,
Value *S) {
2758 V = IRB.
CreateXor(V, ConstantInt::get(
V->getType(), MinVal));
2763 return std::make_pair(Min, Max);
2766 auto [Amin, Amax] = GetMinMaxUnsigned(
A, Sa);
2767 auto [Bmin, Bmax] = GetMinMaxUnsigned(
B, Sb);
2773 setOriginForNaryOp(
I);
2780 void handleSignedRelationalComparison(
ICmpInst &
I) {
2784 if ((constOp = dyn_cast<Constant>(
I.getOperand(1)))) {
2785 op =
I.getOperand(0);
2786 pre =
I.getPredicate();
2787 }
else if ((constOp = dyn_cast<Constant>(
I.getOperand(0)))) {
2788 op =
I.getOperand(1);
2789 pre =
I.getSwappedPredicate();
2802 setShadow(&
I, Shadow);
2803 setOrigin(&
I, getOrigin(
op));
2814 if (
I.isEquality()) {
2815 handleEqualityComparison(
I);
2821 handleRelationalComparisonExact(
I);
2825 handleSignedRelationalComparison(
I);
2830 if ((isa<Constant>(
I.getOperand(0)) || isa<Constant>(
I.getOperand(1)))) {
2831 handleRelationalComparisonExact(
I);
2838 void visitFCmpInst(
FCmpInst &
I) { handleShadowOr(
I); }
2845 Value *S2 = getShadow(&
I, 1);
2850 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2851 setOriginForNaryOp(
I);
2862 Value *S0 = getShadow(&
I, 0);
2864 Value *S2 = getShadow(&
I, 2);
2870 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
2871 setOriginForNaryOp(
I);
2885 getShadow(
I.getArgOperand(1));
2888 {I.getArgOperand(0), I.getArgOperand(1),
2889 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2890 I.eraseFromParent();
2908 getShadow(
I.getArgOperand(1));
2911 {I.getArgOperand(0), I.getArgOperand(1),
2912 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2913 I.eraseFromParent();
2921 {I.getArgOperand(0),
2922 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2923 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2924 I.eraseFromParent();
2927 void visitVAStartInst(
VAStartInst &
I) { VAHelper->visitVAStartInst(
I); }
2929 void visitVACopyInst(
VACopyInst &
I) { VAHelper->visitVACopyInst(
I); }
2938 Value *Shadow = getShadow(&
I, 1);
2939 Value *ShadowPtr, *OriginPtr;
2943 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2948 insertShadowCheck(
Addr, &
I);
2951 if (MS.TrackOrigins)
2964 Type *ShadowTy = getShadowTy(&
I);
2965 Value *ShadowPtr =
nullptr, *OriginPtr =
nullptr;
2966 if (PropagateShadow) {
2970 std::tie(ShadowPtr, OriginPtr) =
2971 getShadowOriginPtr(
Addr, IRB, ShadowTy, Alignment,
false);
2975 setShadow(&
I, getCleanShadow(&
I));
2979 insertShadowCheck(
Addr, &
I);
2981 if (MS.TrackOrigins) {
2982 if (PropagateShadow)
2983 setOrigin(&
I, IRB.
CreateLoad(MS.OriginTy, OriginPtr));
2985 setOrigin(&
I, getCleanOrigin());
2998 if (!(
RetTy->isIntOrIntVectorTy() ||
RetTy->isFPOrFPVectorTy()))
3001 unsigned NumArgOperands =
I.arg_size();
3002 for (
unsigned i = 0; i < NumArgOperands; ++i) {
3003 Type *Ty =
I.getArgOperand(i)->getType();
3009 ShadowAndOriginCombiner
SC(
this, IRB);
3010 for (
unsigned i = 0; i < NumArgOperands; ++i)
3011 SC.Add(
I.getArgOperand(i));
3028 unsigned NumArgOperands =
I.arg_size();
3029 if (NumArgOperands == 0)
3032 if (NumArgOperands == 2 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3033 I.getArgOperand(1)->getType()->isVectorTy() &&
3034 I.getType()->isVoidTy() && !
I.onlyReadsMemory()) {
3036 return handleVectorStoreIntrinsic(
I);
3039 if (NumArgOperands == 1 &&
I.getArgOperand(0)->getType()->isPointerTy() &&
3040 I.getType()->isVectorTy() &&
I.onlyReadsMemory()) {
3042 return handleVectorLoadIntrinsic(
I);
3045 if (
I.doesNotAccessMemory())
3046 if (maybeHandleSimpleNomemIntrinsic(
I))
3054 if (handleUnknownIntrinsicUnlogged(
I)) {
3066 setShadow(&
I, getShadow(&
I, 0));
3067 setOrigin(&
I, getOrigin(&
I, 0));
3075 InstrumentLifetimeStart =
false;
3076 LifetimeStartList.push_back(std::make_pair(&
I, AI));
3082 Type *OpType =
Op->getType();
3085 setOrigin(&
I, getOrigin(
Op));
3090 Value *Src =
I.getArgOperand(0);
3096 Constant *IsZeroPoison = cast<Constant>(
I.getOperand(1));
3099 BoolShadow = IRB.
CreateOr(BoolShadow, BoolZeroPoison,
"_mscz_bs");
3102 Value *OutputShadow =
3103 IRB.
CreateSExt(BoolShadow, getShadowTy(Src),
"_mscz_os");
3105 setShadow(&
I, OutputShadow);
3106 setOriginForNaryOp(
I);
3124 void handleVectorConvertIntrinsic(
IntrinsicInst &
I,
int NumUsedElements,
3125 bool HasRoundingMode =
false) {
3127 Value *CopyOp, *ConvertOp;
3129 assert((!HasRoundingMode ||
3130 isa<ConstantInt>(
I.getArgOperand(
I.arg_size() - 1))) &&
3131 "Invalid rounding mode");
3133 switch (
I.arg_size() - HasRoundingMode) {
3135 CopyOp =
I.getArgOperand(0);
3136 ConvertOp =
I.getArgOperand(1);
3139 ConvertOp =
I.getArgOperand(0);
3153 Value *ConvertShadow = getShadow(ConvertOp);
3154 Value *AggShadow =
nullptr;
3157 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), 0));
3158 for (
int i = 1; i < NumUsedElements; ++i) {
3160 ConvertShadow, ConstantInt::get(IRB.
getInt32Ty(), i));
3161 AggShadow = IRB.
CreateOr(AggShadow, MoreShadow);
3164 AggShadow = ConvertShadow;
3167 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &
I);
3174 Value *ResultShadow = getShadow(CopyOp);
3175 Type *EltTy = cast<VectorType>(ResultShadow->
getType())->getElementType();
3176 for (
int i = 0; i < NumUsedElements; ++i) {
3178 ResultShadow, ConstantInt::getNullValue(EltTy),
3181 setShadow(&
I, ResultShadow);
3182 setOrigin(&
I, getOrigin(CopyOp));
3184 setShadow(&
I, getCleanShadow(&
I));
3185 setOrigin(&
I, getCleanOrigin());
3193 S = CreateShadowCast(IRB, S, IRB.
getInt64Ty(),
true);
3196 return CreateShadowCast(IRB, S2,
T,
true);
3204 return CreateShadowCast(IRB, S2,
T,
true);
3221 void handleVectorShiftIntrinsic(
IntrinsicInst &
I,
bool Variable) {
3227 Value *S2 = getShadow(&
I, 1);
3228 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
3229 : Lower64ShadowExtend(IRB, S2, getShadowTy(&
I));
3230 Value *V1 =
I.getOperand(0);
3233 {IRB.CreateBitCast(S1, V1->getType()), V2});
3235 setShadow(&
I, IRB.
CreateOr(Shift, S2Conv));
3236 setOriginForNaryOp(
I);
3240 Type *getMMXVectorTy(
unsigned EltSizeInBits) {
3241 const unsigned X86_MMXSizeInBits = 64;
3242 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
3243 "Illegal MMX vector element size");
3245 X86_MMXSizeInBits / EltSizeInBits);
3252 case Intrinsic::x86_sse2_packsswb_128:
3253 case Intrinsic::x86_sse2_packuswb_128:
3254 return Intrinsic::x86_sse2_packsswb_128;
3256 case Intrinsic::x86_sse2_packssdw_128:
3257 case Intrinsic::x86_sse41_packusdw:
3258 return Intrinsic::x86_sse2_packssdw_128;
3260 case Intrinsic::x86_avx2_packsswb:
3261 case Intrinsic::x86_avx2_packuswb:
3262 return Intrinsic::x86_avx2_packsswb;
3264 case Intrinsic::x86_avx2_packssdw:
3265 case Intrinsic::x86_avx2_packusdw:
3266 return Intrinsic::x86_avx2_packssdw;
3268 case Intrinsic::x86_mmx_packsswb:
3269 case Intrinsic::x86_mmx_packuswb:
3270 return Intrinsic::x86_mmx_packsswb;
3272 case Intrinsic::x86_mmx_packssdw:
3273 return Intrinsic::x86_mmx_packssdw;
3287 unsigned MMXEltSizeInBits = 0) {
3291 Value *S2 = getShadow(&
I, 1);
3292 assert(
S1->getType()->isVectorTy());
3298 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits) :
S1->
getType();
3299 if (MMXEltSizeInBits) {
3307 if (MMXEltSizeInBits) {
3313 {}, {S1_ext, S2_ext},
nullptr,
3314 "_msprop_vector_pack");
3315 if (MMXEltSizeInBits)
3318 setOriginForNaryOp(
I);
3322 Constant *createDppMask(
unsigned Width,
unsigned Mask) {
3335 const unsigned Width =
3336 cast<FixedVectorType>(S->
getType())->getNumElements();
3342 Value *DstMaskV = createDppMask(Width, DstMask);
3362 Value *S0 = getShadow(&
I, 0);
3366 const unsigned Width =
3367 cast<FixedVectorType>(S->
getType())->getNumElements();
3368 assert(Width == 2 || Width == 4 || Width == 8);
3370 const unsigned Mask = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
3371 const unsigned SrcMask =
Mask >> 4;
3372 const unsigned DstMask =
Mask & 0xf;
3375 Value *SI1 = findDppPoisonedOutput(IRB, S, SrcMask, DstMask);
3380 SI1, findDppPoisonedOutput(IRB, S, SrcMask << 4, DstMask << 4));
3387 setOriginForNaryOp(
I);
3391 C = CreateAppToShadowCast(IRB,
C);
3405 Value *Sc = getShadow(&
I, 2);
3406 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
3411 C = convertBlendvToSelectMask(IRB,
C);
3412 Sc = convertBlendvToSelectMask(IRB, Sc);
3418 handleSelectLikeInst(
I,
C,
T,
F);
3422 void handleVectorSadIntrinsic(
IntrinsicInst &
I,
bool IsMMX =
false) {
3423 const unsigned SignificantBitsPerResultElement = 16;
3425 unsigned ZeroBitsPerResultElement =
3429 auto *Shadow0 = getShadow(&
I, 0);
3430 auto *Shadow1 = getShadow(&
I, 1);
3435 S = IRB.
CreateLShr(S, ZeroBitsPerResultElement);
3438 setOriginForNaryOp(
I);
3443 unsigned MMXEltSizeInBits = 0) {
3445 MMXEltSizeInBits ? getMMXVectorTy(MMXEltSizeInBits * 2) :
I.
getType();
3447 auto *Shadow0 = getShadow(&
I, 0);
3448 auto *Shadow1 = getShadow(&
I, 1);
3455 setOriginForNaryOp(
I);
3463 Type *ResTy = getShadowTy(&
I);
3464 auto *Shadow0 = getShadow(&
I, 0);
3465 auto *Shadow1 = getShadow(&
I, 1);
3470 setOriginForNaryOp(
I);
3478 auto *Shadow0 = getShadow(&
I, 0);
3479 auto *Shadow1 = getShadow(&
I, 1);
3481 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&
I));
3483 setOriginForNaryOp(
I);
3492 setOrigin(&
I, getOrigin(&
I, 0));
3500 Value *OperandShadow = getShadow(&
I, 0);
3502 Value *OperandUnsetOrPoison = IRB.
CreateOr(OperandUnsetBits, OperandShadow);
3510 setOrigin(&
I, getOrigin(&
I, 0));
3518 Value *OperandShadow = getShadow(&
I, 0);
3519 Value *OperandSetOrPoison = IRB.
CreateOr(
I.getOperand(0), OperandShadow);
3527 setOrigin(&
I, getOrigin(&
I, 0));
3535 getShadowOriginPtr(
Addr, IRB, Ty,
Align(1),
true).first;
3540 insertShadowCheck(
Addr, &
I);
3551 Value *ShadowPtr, *OriginPtr;
3552 std::tie(ShadowPtr, OriginPtr) =
3553 getShadowOriginPtr(
Addr, IRB, Ty, Alignment,
false);
3556 insertShadowCheck(
Addr, &
I);
3559 Value *Origin = MS.TrackOrigins ? IRB.
CreateLoad(MS.OriginTy, OriginPtr)
3561 insertShadowCheck(Shadow, Origin, &
I);
3569 Value *PassThru =
I.getArgOperand(2);
3572 insertShadowCheck(
Ptr, &
I);
3573 insertShadowCheck(Mask, &
I);
3576 if (!PropagateShadow) {
3577 setShadow(&
I, getCleanShadow(&
I));
3578 setOrigin(&
I, getCleanOrigin());
3582 Type *ShadowTy = getShadowTy(&
I);
3583 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3584 auto [ShadowPtr, OriginPtr] =
3585 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy,
Align,
false);
3589 getShadow(PassThru),
"_msmaskedexpload");
3591 setShadow(&
I, Shadow);
3594 setOrigin(&
I, getCleanOrigin());
3599 Value *Values =
I.getArgOperand(0);
3605 insertShadowCheck(
Ptr, &
I);
3606 insertShadowCheck(Mask, &
I);
3609 Value *Shadow = getShadow(Values);
3610 Type *ElementShadowTy =
3611 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3612 auto [ShadowPtr, OriginPtrs] =
3613 getShadowOriginPtr(
Ptr, IRB, ElementShadowTy,
Align,
true);
3622 Value *Ptrs =
I.getArgOperand(0);
3623 const Align Alignment(
3624 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3626 Value *PassThru =
I.getArgOperand(3);
3628 Type *PtrsShadowTy = getShadowTy(Ptrs);
3630 insertShadowCheck(Mask, &
I);
3634 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3637 if (!PropagateShadow) {
3638 setShadow(&
I, getCleanShadow(&
I));
3639 setOrigin(&
I, getCleanOrigin());
3643 Type *ShadowTy = getShadowTy(&
I);
3644 Type *ElementShadowTy = cast<VectorType>(ShadowTy)->getElementType();
3645 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3646 Ptrs, IRB, ElementShadowTy, Alignment,
false);
3650 getShadow(PassThru),
"_msmaskedgather");
3652 setShadow(&
I, Shadow);
3655 setOrigin(&
I, getCleanOrigin());
3660 Value *Values =
I.getArgOperand(0);
3661 Value *Ptrs =
I.getArgOperand(1);
3662 const Align Alignment(
3663 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3666 Type *PtrsShadowTy = getShadowTy(Ptrs);
3668 insertShadowCheck(Mask, &
I);
3672 insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &
I);
3675 Value *Shadow = getShadow(Values);
3676 Type *ElementShadowTy =
3677 getShadowTy(cast<VectorType>(Values->
getType())->getElementType());
3678 auto [ShadowPtrs, OriginPtrs] = getShadowOriginPtr(
3679 Ptrs, IRB, ElementShadowTy, Alignment,
true);
3688 Value *
V =
I.getArgOperand(0);
3690 const Align Alignment(
3691 cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue());
3693 Value *Shadow = getShadow(V);
3696 insertShadowCheck(
Ptr, &
I);
3697 insertShadowCheck(Mask, &
I);
3702 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
3703 Ptr, IRB, Shadow->
getType(), Alignment,
true);
3707 if (!MS.TrackOrigins)
3710 auto &
DL =
F.getDataLayout();
3711 paintOrigin(IRB, getOrigin(V), OriginPtr,
3719 const Align Alignment(
3720 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
3722 Value *PassThru =
I.getArgOperand(3);
3725 insertShadowCheck(
Ptr, &
I);
3726 insertShadowCheck(Mask, &
I);
3729 if (!PropagateShadow) {
3730 setShadow(&
I, getCleanShadow(&
I));
3731 setOrigin(&
I, getCleanOrigin());
3735 Type *ShadowTy = getShadowTy(&
I);
3736 Value *ShadowPtr, *OriginPtr;
3737 std::tie(ShadowPtr, OriginPtr) =
3738 getShadowOriginPtr(
Ptr, IRB, ShadowTy, Alignment,
false);
3740 getShadow(PassThru),
"_msmaskedld"));
3742 if (!MS.TrackOrigins)
3749 Value *NotNull = convertToBool(MaskedPassThruShadow, IRB,
"_mscmp");
3754 setOrigin(&
I, Origin);
3764 Type *ShadowTy = getShadowTy(&
I);
3767 Value *SMask = getShadow(&
I, 1);
3772 {getShadow(&I, 0), I.getOperand(1)});
3775 setOriginForNaryOp(
I);
3780 for (
unsigned X = OddElements ? 1 : 0;
X < Width;
X += 2) {
3797 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3798 assert(isa<ConstantInt>(
I.getArgOperand(2)) &&
3799 "pclmul 3rd operand must be a constant");
3800 unsigned Imm = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
3802 getPclmulMask(Width, Imm & 0x01));
3804 getPclmulMask(Width, Imm & 0x10));
3805 ShadowAndOriginCombiner SOC(
this, IRB);
3806 SOC.Add(Shuf0, getOrigin(&
I, 0));
3807 SOC.Add(Shuf1, getOrigin(&
I, 1));
3815 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3817 Value *Second = getShadow(&
I, 1);
3820 Mask.push_back(Width);
3821 for (
unsigned i = 1; i < Width; i++)
3825 setShadow(&
I, Shadow);
3826 setOriginForNaryOp(
I);
3831 Value *Shadow0 = getShadow(&
I, 0);
3832 Value *Shadow1 = getShadow(&
I, 1);
3838 setShadow(&
I, Shadow);
3839 setOriginForNaryOp(
I);
3845 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements();
3847 Value *Second = getShadow(&
I, 1);
3851 Mask.push_back(Width);
3852 for (
unsigned i = 1; i < Width; i++)
3856 setShadow(&
I, Shadow);
3857 setOriginForNaryOp(
I);
3864 assert(
I.getArgOperand(0)->getType() ==
I.getType());
3866 assert(isa<ConstantInt>(
I.getArgOperand(1)));
3869 ShadowAndOriginCombiner
SC(
this, IRB);
3870 SC.Add(
I.getArgOperand(0));
3878 assert(
I.getType()->isIntOrIntVectorTy());
3879 assert(
I.getArgOperand(0)->getType() ==
I.getType());
3883 setShadow(&
I, getShadow(&
I, 0));
3884 setOrigin(&
I, getOrigin(&
I, 0));
3889 Value *Shadow = getShadow(&
I, 0);
3890 setShadow(&
I, IRB.
CreateICmpNE(Shadow, getCleanShadow(Shadow)));
3891 setOrigin(&
I, getOrigin(&
I, 0));
3896 Value *Shadow0 = getShadow(&
I, 0);
3897 Value *Shadow1 = getShadow(&
I, 1);
3900 IRB.
CreateICmpNE(ShadowElt0, getCleanShadow(ShadowElt0));
3906 setShadow(&
I, Shadow);
3907 setOriginForNaryOp(
I);
3924 return handleIntrinsicByApplyingToShadow(
I, 0);
3941 void handleNEONVectorStoreIntrinsic(
IntrinsicInst &
I,
bool useLane) {
3945 int numArgOperands =
I.arg_size();
3948 assert(numArgOperands >= 1);
3949 Value *
Addr =
I.getArgOperand(numArgOperands - 1);
3951 int skipTrailingOperands = 1;
3954 insertShadowCheck(
Addr, &
I);
3958 skipTrailingOperands++;
3959 assert(numArgOperands >=
static_cast<int>(skipTrailingOperands));
3961 I.getArgOperand(numArgOperands - skipTrailingOperands)->getType()));
3966 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++) {
3967 assert(isa<FixedVectorType>(
I.getArgOperand(i)->getType()));
3968 Value *Shadow = getShadow(&
I, i);
3969 ShadowArgs.
append(1, Shadow);
3984 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getElementType(),
3985 cast<FixedVectorType>(
I.getArgOperand(0)->getType())->getNumElements() *
3986 (numArgOperands - skipTrailingOperands));
3987 Type *OutputShadowTy = getShadowTy(OutputVectorTy);
3991 I.getArgOperand(numArgOperands - skipTrailingOperands));
3993 Value *OutputShadowPtr, *OutputOriginPtr;
3995 std::tie(OutputShadowPtr, OutputOriginPtr) = getShadowOriginPtr(
3996 Addr, IRB, OutputShadowTy,
Align(1),
true);
3997 ShadowArgs.
append(1, OutputShadowPtr);
4003 if (MS.TrackOrigins) {
4011 OriginCombiner
OC(
this, IRB);
4012 for (
int i = 0; i < numArgOperands - skipTrailingOperands; i++)
4013 OC.Add(
I.getArgOperand(i));
4016 OC.DoneAndStoreOrigin(
DL.getTypeStoreSize(OutputVectorTy),
4040 unsigned int trailingVerbatimArgs) {
4043 assert(trailingVerbatimArgs <
I.arg_size());
4047 for (
unsigned int i = 0; i <
I.arg_size() - trailingVerbatimArgs; i++) {
4048 Value *Shadow = getShadow(&
I, i);
4056 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
4058 Value *Arg =
I.getArgOperand(i);
4064 Value *CombinedShadow = CI;
4067 for (
unsigned int i =
I.arg_size() - trailingVerbatimArgs; i <
I.arg_size();
4070 CreateShadowCast(IRB, getShadow(&
I, i), CombinedShadow->
getType());
4071 CombinedShadow = IRB.
CreateOr(Shadow, CombinedShadow,
"_msprop");
4076 setOriginForNaryOp(
I);
4085 switch (
I.getIntrinsicID()) {
4086 case Intrinsic::uadd_with_overflow:
4087 case Intrinsic::sadd_with_overflow:
4088 case Intrinsic::usub_with_overflow:
4089 case Intrinsic::ssub_with_overflow:
4090 case Intrinsic::umul_with_overflow:
4091 case Intrinsic::smul_with_overflow:
4092 handleArithmeticWithOverflow(
I);
4094 case Intrinsic::abs:
4095 handleAbsIntrinsic(
I);
4097 case Intrinsic::is_fpclass:
4100 case Intrinsic::lifetime_start:
4101 handleLifetimeStart(
I);
4103 case Intrinsic::launder_invariant_group:
4104 case Intrinsic::strip_invariant_group:
4105 handleInvariantGroup(
I);
4107 case Intrinsic::bswap:
4110 case Intrinsic::ctlz:
4111 case Intrinsic::cttz:
4112 handleCountZeroes(
I);
4114 case Intrinsic::masked_compressstore:
4115 handleMaskedCompressStore(
I);
4117 case Intrinsic::masked_expandload:
4118 handleMaskedExpandLoad(
I);
4120 case Intrinsic::masked_gather:
4121 handleMaskedGather(
I);
4123 case Intrinsic::masked_scatter:
4124 handleMaskedScatter(
I);
4126 case Intrinsic::masked_store:
4127 handleMaskedStore(
I);
4129 case Intrinsic::masked_load:
4130 handleMaskedLoad(
I);
4132 case Intrinsic::vector_reduce_and:
4133 handleVectorReduceAndIntrinsic(
I);
4135 case Intrinsic::vector_reduce_or:
4136 handleVectorReduceOrIntrinsic(
I);
4138 case Intrinsic::vector_reduce_add:
4139 case Intrinsic::vector_reduce_xor:
4140 case Intrinsic::vector_reduce_mul:
4141 handleVectorReduceIntrinsic(
I);
4143 case Intrinsic::x86_sse_stmxcsr:
4146 case Intrinsic::x86_sse_ldmxcsr:
4149 case Intrinsic::x86_avx512_vcvtsd2usi64:
4150 case Intrinsic::x86_avx512_vcvtsd2usi32:
4151 case Intrinsic::x86_avx512_vcvtss2usi64:
4152 case Intrinsic::x86_avx512_vcvtss2usi32:
4153 case Intrinsic::x86_avx512_cvttss2usi64:
4154 case Intrinsic::x86_avx512_cvttss2usi:
4155 case Intrinsic::x86_avx512_cvttsd2usi64:
4156 case Intrinsic::x86_avx512_cvttsd2usi:
4157 case Intrinsic::x86_avx512_cvtusi2ss:
4158 case Intrinsic::x86_avx512_cvtusi642sd:
4159 case Intrinsic::x86_avx512_cvtusi642ss:
4160 handleVectorConvertIntrinsic(
I, 1,
true);
4162 case Intrinsic::x86_sse2_cvtsd2si64:
4163 case Intrinsic::x86_sse2_cvtsd2si:
4164 case Intrinsic::x86_sse2_cvtsd2ss:
4165 case Intrinsic::x86_sse2_cvttsd2si64:
4166 case Intrinsic::x86_sse2_cvttsd2si:
4167 case Intrinsic::x86_sse_cvtss2si64:
4168 case Intrinsic::x86_sse_cvtss2si:
4169 case Intrinsic::x86_sse_cvttss2si64:
4170 case Intrinsic::x86_sse_cvttss2si:
4171 handleVectorConvertIntrinsic(
I, 1);
4173 case Intrinsic::x86_sse_cvtps2pi:
4174 case Intrinsic::x86_sse_cvttps2pi:
4175 handleVectorConvertIntrinsic(
I, 2);
4178 case Intrinsic::x86_avx512_psll_w_512:
4179 case Intrinsic::x86_avx512_psll_d_512:
4180 case Intrinsic::x86_avx512_psll_q_512:
4181 case Intrinsic::x86_avx512_pslli_w_512:
4182 case Intrinsic::x86_avx512_pslli_d_512:
4183 case Intrinsic::x86_avx512_pslli_q_512:
4184 case Intrinsic::x86_avx512_psrl_w_512:
4185 case Intrinsic::x86_avx512_psrl_d_512:
4186 case Intrinsic::x86_avx512_psrl_q_512:
4187 case Intrinsic::x86_avx512_psra_w_512:
4188 case Intrinsic::x86_avx512_psra_d_512:
4189 case Intrinsic::x86_avx512_psra_q_512:
4190 case Intrinsic::x86_avx512_psrli_w_512:
4191 case Intrinsic::x86_avx512_psrli_d_512:
4192 case Intrinsic::x86_avx512_psrli_q_512:
4193 case Intrinsic::x86_avx512_psrai_w_512:
4194 case Intrinsic::x86_avx512_psrai_d_512:
4195 case Intrinsic::x86_avx512_psrai_q_512:
4196 case Intrinsic::x86_avx512_psra_q_256:
4197 case Intrinsic::x86_avx512_psra_q_128:
4198 case Intrinsic::x86_avx512_psrai_q_256:
4199 case Intrinsic::x86_avx512_psrai_q_128:
4200 case Intrinsic::x86_avx2_psll_w:
4201 case Intrinsic::x86_avx2_psll_d:
4202 case Intrinsic::x86_avx2_psll_q:
4203 case Intrinsic::x86_avx2_pslli_w:
4204 case Intrinsic::x86_avx2_pslli_d:
4205 case Intrinsic::x86_avx2_pslli_q:
4206 case Intrinsic::x86_avx2_psrl_w:
4207 case Intrinsic::x86_avx2_psrl_d:
4208 case Intrinsic::x86_avx2_psrl_q:
4209 case Intrinsic::x86_avx2_psra_w:
4210 case Intrinsic::x86_avx2_psra_d:
4211 case Intrinsic::x86_avx2_psrli_w:
4212 case Intrinsic::x86_avx2_psrli_d:
4213 case Intrinsic::x86_avx2_psrli_q:
4214 case Intrinsic::x86_avx2_psrai_w:
4215 case Intrinsic::x86_avx2_psrai_d:
4216 case Intrinsic::x86_sse2_psll_w:
4217 case Intrinsic::x86_sse2_psll_d:
4218 case Intrinsic::x86_sse2_psll_q:
4219 case Intrinsic::x86_sse2_pslli_w:
4220 case Intrinsic::x86_sse2_pslli_d:
4221 case Intrinsic::x86_sse2_pslli_q:
4222 case Intrinsic::x86_sse2_psrl_w:
4223 case Intrinsic::x86_sse2_psrl_d:
4224 case Intrinsic::x86_sse2_psrl_q:
4225 case Intrinsic::x86_sse2_psra_w:
4226 case Intrinsic::x86_sse2_psra_d:
4227 case Intrinsic::x86_sse2_psrli_w:
4228 case Intrinsic::x86_sse2_psrli_d:
4229 case Intrinsic::x86_sse2_psrli_q:
4230 case Intrinsic::x86_sse2_psrai_w:
4231 case Intrinsic::x86_sse2_psrai_d:
4232 case Intrinsic::x86_mmx_psll_w:
4233 case Intrinsic::x86_mmx_psll_d:
4234 case Intrinsic::x86_mmx_psll_q:
4235 case Intrinsic::x86_mmx_pslli_w:
4236 case Intrinsic::x86_mmx_pslli_d:
4237 case Intrinsic::x86_mmx_pslli_q:
4238 case Intrinsic::x86_mmx_psrl_w:
4239 case Intrinsic::x86_mmx_psrl_d:
4240 case Intrinsic::x86_mmx_psrl_q:
4241 case Intrinsic::x86_mmx_psra_w:
4242 case Intrinsic::x86_mmx_psra_d:
4243 case Intrinsic::x86_mmx_psrli_w:
4244 case Intrinsic::x86_mmx_psrli_d:
4245 case Intrinsic::x86_mmx_psrli_q:
4246 case Intrinsic::x86_mmx_psrai_w:
4247 case Intrinsic::x86_mmx_psrai_d:
4248 case Intrinsic::aarch64_neon_rshrn:
4249 case Intrinsic::aarch64_neon_sqrshl:
4250 case Intrinsic::aarch64_neon_sqrshrn:
4251 case Intrinsic::aarch64_neon_sqrshrun:
4252 case Intrinsic::aarch64_neon_sqshl:
4253 case Intrinsic::aarch64_neon_sqshlu:
4254 case Intrinsic::aarch64_neon_sqshrn:
4255 case Intrinsic::aarch64_neon_sqshrun:
4256 case Intrinsic::aarch64_neon_srshl:
4257 case Intrinsic::aarch64_neon_sshl:
4258 case Intrinsic::aarch64_neon_uqrshl:
4259 case Intrinsic::aarch64_neon_uqrshrn:
4260 case Intrinsic::aarch64_neon_uqshl:
4261 case Intrinsic::aarch64_neon_uqshrn:
4262 case Intrinsic::aarch64_neon_urshl:
4263 case Intrinsic::aarch64_neon_ushl:
4265 handleVectorShiftIntrinsic(
I,
false);
4267 case Intrinsic::x86_avx2_psllv_d:
4268 case Intrinsic::x86_avx2_psllv_d_256:
4269 case Intrinsic::x86_avx512_psllv_d_512:
4270 case Intrinsic::x86_avx2_psllv_q:
4271 case Intrinsic::x86_avx2_psllv_q_256:
4272 case Intrinsic::x86_avx512_psllv_q_512:
4273 case Intrinsic::x86_avx2_psrlv_d:
4274 case Intrinsic::x86_avx2_psrlv_d_256:
4275 case Intrinsic::x86_avx512_psrlv_d_512:
4276 case Intrinsic::x86_avx2_psrlv_q:
4277 case Intrinsic::x86_avx2_psrlv_q_256:
4278 case Intrinsic::x86_avx512_psrlv_q_512:
4279 case Intrinsic::x86_avx2_psrav_d:
4280 case Intrinsic::x86_avx2_psrav_d_256:
4281 case Intrinsic::x86_avx512_psrav_d_512:
4282 case Intrinsic::x86_avx512_psrav_q_128:
4283 case Intrinsic::x86_avx512_psrav_q_256:
4284 case Intrinsic::x86_avx512_psrav_q_512:
4285 handleVectorShiftIntrinsic(
I,
true);
4288 case Intrinsic::x86_sse2_packsswb_128:
4289 case Intrinsic::x86_sse2_packssdw_128:
4290 case Intrinsic::x86_sse2_packuswb_128:
4291 case Intrinsic::x86_sse41_packusdw:
4292 case Intrinsic::x86_avx2_packsswb:
4293 case Intrinsic::x86_avx2_packssdw:
4294 case Intrinsic::x86_avx2_packuswb:
4295 case Intrinsic::x86_avx2_packusdw:
4296 handleVectorPackIntrinsic(
I);
4299 case Intrinsic::x86_sse41_pblendvb:
4300 case Intrinsic::x86_sse41_blendvpd:
4301 case Intrinsic::x86_sse41_blendvps:
4302 case Intrinsic::x86_avx_blendv_pd_256:
4303 case Intrinsic::x86_avx_blendv_ps_256:
4304 case Intrinsic::x86_avx2_pblendvb:
4305 handleBlendvIntrinsic(
I);
4308 case Intrinsic::x86_avx_dp_ps_256:
4309 case Intrinsic::x86_sse41_dppd:
4310 case Intrinsic::x86_sse41_dpps:
4311 handleDppIntrinsic(
I);
4314 case Intrinsic::x86_mmx_packsswb:
4315 case Intrinsic::x86_mmx_packuswb:
4316 handleVectorPackIntrinsic(
I, 16);
4319 case Intrinsic::x86_mmx_packssdw:
4320 handleVectorPackIntrinsic(
I, 32);
4323 case Intrinsic::x86_mmx_psad_bw:
4324 handleVectorSadIntrinsic(
I,
true);
4326 case Intrinsic::x86_sse2_psad_bw:
4327 case Intrinsic::x86_avx2_psad_bw:
4328 handleVectorSadIntrinsic(
I);
4331 case Intrinsic::x86_sse2_pmadd_wd:
4332 case Intrinsic::x86_avx2_pmadd_wd:
4333 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
4334 case Intrinsic::x86_avx2_pmadd_ub_sw:
4335 handleVectorPmaddIntrinsic(
I);
4338 case Intrinsic::x86_ssse3_pmadd_ub_sw:
4339 handleVectorPmaddIntrinsic(
I, 8);
4342 case Intrinsic::x86_mmx_pmadd_wd:
4343 handleVectorPmaddIntrinsic(
I, 16);
4346 case Intrinsic::x86_sse_cmp_ss:
4347 case Intrinsic::x86_sse2_cmp_sd:
4348 case Intrinsic::x86_sse_comieq_ss:
4349 case Intrinsic::x86_sse_comilt_ss:
4350 case Intrinsic::x86_sse_comile_ss:
4351 case Intrinsic::x86_sse_comigt_ss:
4352 case Intrinsic::x86_sse_comige_ss:
4353 case Intrinsic::x86_sse_comineq_ss:
4354 case Intrinsic::x86_sse_ucomieq_ss:
4355 case Intrinsic::x86_sse_ucomilt_ss:
4356 case Intrinsic::x86_sse_ucomile_ss:
4357 case Intrinsic::x86_sse_ucomigt_ss:
4358 case Intrinsic::x86_sse_ucomige_ss:
4359 case Intrinsic::x86_sse_ucomineq_ss:
4360 case Intrinsic::x86_sse2_comieq_sd:
4361 case Intrinsic::x86_sse2_comilt_sd:
4362 case Intrinsic::x86_sse2_comile_sd:
4363 case Intrinsic::x86_sse2_comigt_sd:
4364 case Intrinsic::x86_sse2_comige_sd:
4365 case Intrinsic::x86_sse2_comineq_sd:
4366 case Intrinsic::x86_sse2_ucomieq_sd:
4367 case Intrinsic::x86_sse2_ucomilt_sd:
4368 case Intrinsic::x86_sse2_ucomile_sd:
4369 case Intrinsic::x86_sse2_ucomigt_sd:
4370 case Intrinsic::x86_sse2_ucomige_sd:
4371 case Intrinsic::x86_sse2_ucomineq_sd:
4372 handleVectorCompareScalarIntrinsic(
I);
4375 case Intrinsic::x86_avx_cmp_pd_256:
4376 case Intrinsic::x86_avx_cmp_ps_256:
4377 case Intrinsic::x86_sse2_cmp_pd:
4378 case Intrinsic::x86_sse_cmp_ps:
4379 handleVectorComparePackedIntrinsic(
I);
4382 case Intrinsic::x86_bmi_bextr_32:
4383 case Intrinsic::x86_bmi_bextr_64:
4384 case Intrinsic::x86_bmi_bzhi_32:
4385 case Intrinsic::x86_bmi_bzhi_64:
4386 case Intrinsic::x86_bmi_pdep_32:
4387 case Intrinsic::x86_bmi_pdep_64:
4388 case Intrinsic::x86_bmi_pext_32:
4389 case Intrinsic::x86_bmi_pext_64:
4390 handleBmiIntrinsic(
I);
4393 case Intrinsic::x86_pclmulqdq:
4394 case Intrinsic::x86_pclmulqdq_256:
4395 case Intrinsic::x86_pclmulqdq_512:
4396 handlePclmulIntrinsic(
I);
4399 case Intrinsic::x86_avx_round_pd_256:
4400 case Intrinsic::x86_avx_round_ps_256:
4401 case Intrinsic::x86_sse41_round_pd:
4402 case Intrinsic::x86_sse41_round_ps:
4403 handleRoundPdPsIntrinsic(
I);
4406 case Intrinsic::x86_sse41_round_sd:
4407 case Intrinsic::x86_sse41_round_ss:
4408 handleUnarySdSsIntrinsic(
I);
4411 case Intrinsic::x86_sse2_max_sd:
4412 case Intrinsic::x86_sse_max_ss:
4413 case Intrinsic::x86_sse2_min_sd:
4414 case Intrinsic::x86_sse_min_ss:
4415 handleBinarySdSsIntrinsic(
I);
4418 case Intrinsic::x86_avx_vtestc_pd:
4419 case Intrinsic::x86_avx_vtestc_pd_256:
4420 case Intrinsic::x86_avx_vtestc_ps:
4421 case Intrinsic::x86_avx_vtestc_ps_256:
4422 case Intrinsic::x86_avx_vtestnzc_pd:
4423 case Intrinsic::x86_avx_vtestnzc_pd_256:
4424 case Intrinsic::x86_avx_vtestnzc_ps:
4425 case Intrinsic::x86_avx_vtestnzc_ps_256:
4426 case Intrinsic::x86_avx_vtestz_pd:
4427 case Intrinsic::x86_avx_vtestz_pd_256:
4428 case Intrinsic::x86_avx_vtestz_ps:
4429 case Intrinsic::x86_avx_vtestz_ps_256:
4430 case Intrinsic::x86_avx_ptestc_256:
4431 case Intrinsic::x86_avx_ptestnzc_256:
4432 case Intrinsic::x86_avx_ptestz_256:
4433 case Intrinsic::x86_sse41_ptestc:
4434 case Intrinsic::x86_sse41_ptestnzc:
4435 case Intrinsic::x86_sse41_ptestz:
4436 handleVtestIntrinsic(
I);
4439 case Intrinsic::x86_sse3_hadd_ps:
4440 case Intrinsic::x86_sse3_hadd_pd:
4441 case Intrinsic::x86_ssse3_phadd_d:
4442 case Intrinsic::x86_ssse3_phadd_d_128:
4443 case Intrinsic::x86_ssse3_phadd_w:
4444 case Intrinsic::x86_ssse3_phadd_w_128:
4445 case Intrinsic::x86_ssse3_phadd_sw:
4446 case Intrinsic::x86_ssse3_phadd_sw_128:
4447 case Intrinsic::x86_avx_hadd_pd_256:
4448 case Intrinsic::x86_avx_hadd_ps_256:
4449 case Intrinsic::x86_avx2_phadd_d:
4450 case Intrinsic::x86_avx2_phadd_w:
4451 case Intrinsic::x86_avx2_phadd_sw:
4452 case Intrinsic::x86_sse3_hsub_ps:
4453 case Intrinsic::x86_sse3_hsub_pd:
4454 case Intrinsic::x86_ssse3_phsub_d:
4455 case Intrinsic::x86_ssse3_phsub_d_128:
4456 case Intrinsic::x86_ssse3_phsub_w:
4457 case Intrinsic::x86_ssse3_phsub_w_128:
4458 case Intrinsic::x86_ssse3_phsub_sw:
4459 case Intrinsic::x86_ssse3_phsub_sw_128:
4460 case Intrinsic::x86_avx_hsub_pd_256:
4461 case Intrinsic::x86_avx_hsub_ps_256:
4462 case Intrinsic::x86_avx2_phsub_d:
4463 case Intrinsic::x86_avx2_phsub_w:
4464 case Intrinsic::x86_avx2_phsub_sw: {
4465 handleAVXHorizontalAddSubIntrinsic(
I);
4469 case Intrinsic::fshl:
4470 case Intrinsic::fshr:
4471 handleFunnelShift(
I);
4474 case Intrinsic::is_constant:
4476 setShadow(&
I, getCleanShadow(&
I));
4477 setOrigin(&
I, getCleanOrigin());
4480 case Intrinsic::aarch64_neon_st1x2:
4481 case Intrinsic::aarch64_neon_st1x3:
4482 case Intrinsic::aarch64_neon_st1x4:
4483 case Intrinsic::aarch64_neon_st2:
4484 case Intrinsic::aarch64_neon_st3:
4485 case Intrinsic::aarch64_neon_st4: {
4486 handleNEONVectorStoreIntrinsic(
I,
false);
4490 case Intrinsic::aarch64_neon_st2lane:
4491 case Intrinsic::aarch64_neon_st3lane:
4492 case Intrinsic::aarch64_neon_st4lane: {
4493 handleNEONVectorStoreIntrinsic(
I,
true);
4506 case Intrinsic::aarch64_neon_tbl1:
4507 case Intrinsic::aarch64_neon_tbl2:
4508 case Intrinsic::aarch64_neon_tbl3:
4509 case Intrinsic::aarch64_neon_tbl4:
4510 case Intrinsic::aarch64_neon_tbx1:
4511 case Intrinsic::aarch64_neon_tbx2:
4512 case Intrinsic::aarch64_neon_tbx3:
4513 case Intrinsic::aarch64_neon_tbx4: {
4515 handleIntrinsicByApplyingToShadow(
I, 1);
4519 case Intrinsic::aarch64_neon_fmulx:
4520 case Intrinsic::aarch64_neon_pmul:
4521 case Intrinsic::aarch64_neon_pmull:
4522 case Intrinsic::aarch64_neon_smull:
4523 case Intrinsic::aarch64_neon_pmull64:
4524 case Intrinsic::aarch64_neon_umull: {
4525 handleNEONVectorMultiplyIntrinsic(
I);
4530 if (!handleUnknownIntrinsic(
I))
4531 visitInstruction(
I);
4536 void visitLibAtomicLoad(
CallBase &CB) {
4538 assert(isa<CallInst>(CB));
4547 Value *NewOrdering =
4551 NextNodeIRBuilder NextIRB(&CB);
4552 Value *SrcShadowPtr, *SrcOriginPtr;
4553 std::tie(SrcShadowPtr, SrcOriginPtr) =
4554 getShadowOriginPtr(SrcPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4556 Value *DstShadowPtr =
4557 getShadowOriginPtr(DstPtr, NextIRB, NextIRB.getInt8Ty(),
Align(1),
4561 NextIRB.CreateMemCpy(DstShadowPtr,
Align(1), SrcShadowPtr,
Align(1),
Size);
4562 if (MS.TrackOrigins) {
4563 Value *SrcOrigin = NextIRB.CreateAlignedLoad(MS.OriginTy, SrcOriginPtr,
4565 Value *NewOrigin = updateOrigin(SrcOrigin, NextIRB);
4566 NextIRB.CreateCall(MS.MsanSetOriginFn, {DstPtr, Size, NewOrigin});
4570 void visitLibAtomicStore(
CallBase &CB) {
4577 Value *NewOrdering =
4581 Value *DstShadowPtr =
4599 visitAsmInstruction(CB);
4601 visitInstruction(CB);
4610 case LibFunc_atomic_load:
4611 if (!isa<CallInst>(CB)) {
4612 llvm::errs() <<
"MSAN -- cannot instrument invoke of libatomic load."
4616 visitLibAtomicLoad(CB);
4618 case LibFunc_atomic_store:
4619 visitLibAtomicStore(CB);
4626 if (
auto *Call = dyn_cast<CallInst>(&CB)) {
4627 assert(!isa<IntrinsicInst>(Call) &&
"intrinsics are handled elsewhere");
4635 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
4637 Call->removeFnAttrs(
B);
4639 Func->removeFnAttrs(
B);
4645 bool MayCheckCall = MS.EagerChecks;
4649 MayCheckCall &= !
Func->getName().starts_with(
"__sanitizer_unaligned_");
4652 unsigned ArgOffset = 0;
4655 if (!
A->getType()->isSized()) {
4656 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is not sized: " << CB <<
"\n");
4660 if (
A->getType()->isScalableTy()) {
4661 LLVM_DEBUG(
dbgs() <<
"Arg " << i <<
" is vscale: " << CB <<
"\n");
4663 insertShadowCheck(
A, &CB);
4672 bool EagerCheck = MayCheckCall && !ByVal && NoUndef;
4675 insertShadowCheck(
A, &CB);
4676 Size =
DL.getTypeAllocSize(
A->getType());
4682 Value *ArgShadow = getShadow(
A);
4683 Value *ArgShadowBase = getShadowPtrForArgument(IRB, ArgOffset);
4685 <<
" Shadow: " << *ArgShadow <<
"\n");
4689 assert(
A->getType()->isPointerTy() &&
4690 "ByVal argument is not a pointer!");
4698 Value *AShadowPtr, *AOriginPtr;
4699 std::tie(AShadowPtr, AOriginPtr) =
4700 getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(), Alignment,
4702 if (!PropagateShadow) {
4709 if (MS.TrackOrigins) {
4710 Value *ArgOriginBase = getOriginPtrForArgument(IRB, ArgOffset);
4724 Size =
DL.getTypeAllocSize(
A->getType());
4729 Constant *Cst = dyn_cast<Constant>(ArgShadow);
4730 if (MS.TrackOrigins && !(Cst && Cst->
isNullValue())) {
4732 getOriginPtrForArgument(IRB, ArgOffset));
4736 assert(Store !=
nullptr);
4745 if (FT->isVarArg()) {
4746 VAHelper->visitCallBase(CB, IRB);
4753 if (isa<CallInst>(CB) && cast<CallInst>(CB).isMustTailCall())
4756 if (MayCheckCall && CB.
hasRetAttr(Attribute::NoUndef)) {
4757 setShadow(&CB, getCleanShadow(&CB));
4758 setOrigin(&CB, getCleanOrigin());
4764 Value *
Base = getShadowPtrForRetval(IRBBefore);
4765 IRBBefore.CreateAlignedStore(getCleanShadow(&CB),
Base,
4768 if (isa<CallInst>(CB)) {
4772 BasicBlock *NormalDest = cast<InvokeInst>(CB).getNormalDest();
4777 setShadow(&CB, getCleanShadow(&CB));
4778 setOrigin(&CB, getCleanOrigin());
4785 "Could not find insertion point for retval shadow load");
4788 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
4791 setShadow(&CB, RetvalShadow);
4792 if (MS.TrackOrigins)
4793 setOrigin(&CB, IRBAfter.CreateLoad(MS.OriginTy, getOriginPtrForRetval()));
4797 if (
auto *
I = dyn_cast<BitCastInst>(RetVal)) {
4798 RetVal =
I->getOperand(0);
4800 if (
auto *
I = dyn_cast<CallInst>(RetVal)) {
4801 return I->isMustTailCall();
4808 Value *RetVal =
I.getReturnValue();
4814 Value *ShadowPtr = getShadowPtrForRetval(IRB);
4815 bool HasNoUndef =
F.hasRetAttribute(Attribute::NoUndef);
4816 bool StoreShadow = !(MS.EagerChecks && HasNoUndef);
4819 bool EagerCheck = (MS.EagerChecks && HasNoUndef) || (
F.getName() ==
"main");
4821 Value *Shadow = getShadow(RetVal);
4822 bool StoreOrigin =
true;
4824 insertShadowCheck(RetVal, &
I);
4825 Shadow = getCleanShadow(RetVal);
4826 StoreOrigin =
false;
4833 if (MS.TrackOrigins && StoreOrigin)
4834 IRB.
CreateStore(getOrigin(RetVal), getOriginPtrForRetval());
4840 if (!PropagateShadow) {
4841 setShadow(&
I, getCleanShadow(&
I));
4842 setOrigin(&
I, getCleanOrigin());
4846 ShadowPHINodes.push_back(&
I);
4847 setShadow(&
I, IRB.
CreatePHI(getShadowTy(&
I),
I.getNumIncomingValues(),
4849 if (MS.TrackOrigins)
4851 &
I, IRB.
CreatePHI(MS.OriginTy,
I.getNumIncomingValues(),
"_msphi_o"));
4868 IRB.
CreateCall(MS.MsanPoisonStackFn, {&I, Len});
4870 Value *ShadowBase, *OriginBase;
4871 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
4878 if (PoisonStack && MS.TrackOrigins) {
4879 Value *Idptr = getLocalVarIdptr(
I);
4881 Value *Descr = getLocalVarDescription(
I);
4882 IRB.
CreateCall(MS.MsanSetAllocaOriginWithDescriptionFn,
4883 {&I, Len, Idptr, Descr});
4885 IRB.
CreateCall(MS.MsanSetAllocaOriginNoDescriptionFn, {&I, Len, Idptr});
4891 Value *Descr = getLocalVarDescription(
I);
4893 IRB.
CreateCall(MS.MsanPoisonAllocaFn, {&I, Len, Descr});
4895 IRB.
CreateCall(MS.MsanUnpoisonAllocaFn, {&I, Len});
4902 NextNodeIRBuilder IRB(InsPoint);
4904 TypeSize TS =
DL.getTypeAllocSize(
I.getAllocatedType());
4906 if (
I.isArrayAllocation())
4910 if (MS.CompileKernel)
4911 poisonAllocaKmsan(
I, IRB, Len);
4913 poisonAllocaUserspace(
I, IRB, Len);
4917 setShadow(&
I, getCleanShadow(&
I));
4918 setOrigin(&
I, getCleanOrigin());
4930 handleSelectLikeInst(
I,
B,
C,
D);
4936 Value *Sb = getShadow(
B);
4937 Value *Sc = getShadow(
C);
4938 Value *Sd = getShadow(
D);
4940 Value *Ob = MS.TrackOrigins ? getOrigin(
B) : nullptr;
4941 Value *Oc = MS.TrackOrigins ? getOrigin(
C) : nullptr;
4942 Value *Od = MS.TrackOrigins ? getOrigin(
D) : nullptr;
4947 if (
I.getType()->isAggregateType()) {
4951 Sa1 = getPoisonedShadow(getShadowTy(
I.getType()));
4959 C = CreateAppToShadowCast(IRB,
C);
4960 D = CreateAppToShadowCast(IRB,
D);
4967 if (MS.TrackOrigins) {
4970 if (
B->getType()->isVectorTy()) {
4971 B = convertToBool(
B, IRB);
4972 Sb = convertToBool(Sb, IRB);
4983 setShadow(&
I, getCleanShadow(&
I));
4984 setOrigin(&
I, getCleanOrigin());
4988 setShadow(&
I, getCleanShadow(&
I));
4989 setOrigin(&
I, getCleanOrigin());
4993 setShadow(&
I, getCleanShadow(&
I));
4994 setOrigin(&
I, getCleanOrigin());
5001 Value *Agg =
I.getAggregateOperand();
5003 Value *AggShadow = getShadow(Agg);
5007 setShadow(&
I, ResShadow);
5008 setOriginForNaryOp(
I);
5014 Value *AggShadow = getShadow(
I.getAggregateOperand());
5015 Value *InsShadow = getShadow(
I.getInsertedValueOperand());
5021 setOriginForNaryOp(
I);
5025 if (
CallInst *CI = dyn_cast<CallInst>(&
I)) {
5028 errs() <<
"ZZZ " <<
I.getOpcodeName() <<
"\n";
5030 errs() <<
"QQQ " <<
I <<
"\n";
5057 insertShadowCheck(Operand, &
I);
5064 auto Size =
DL.getTypeStoreSize(ElemTy);
5066 if (MS.CompileKernel) {
5067 IRB.
CreateCall(MS.MsanInstrumentAsmStoreFn, {Operand, SizeVal});
5073 auto [ShadowPtr,
_] =
5074 getShadowOriginPtrUserspace(Operand, IRB, IRB.
getInt8Ty(),
Align(1));
5085 int NumRetOutputs = 0;
5087 Type *
RetTy = cast<Value>(CB)->getType();
5088 if (!
RetTy->isVoidTy()) {
5090 auto *
ST = dyn_cast<StructType>(
RetTy);
5092 NumRetOutputs =
ST->getNumElements();
5098 switch (
Info.Type) {
5106 return NumOutputs - NumRetOutputs;
5129 int OutputArgs = getNumOutputArgs(IA, CB);
5135 for (
int i = OutputArgs; i < NumOperands; i++) {
5143 for (
int i = 0; i < OutputArgs; i++) {
5149 setShadow(&
I, getCleanShadow(&
I));
5150 setOrigin(&
I, getCleanOrigin());
5155 setShadow(&
I, getCleanShadow(&
I));
5156 setOrigin(&
I, getCleanOrigin());
5164 for (
size_t i = 0, n =
I.getNumOperands(); i < n; i++) {
5165 Value *Operand =
I.getOperand(i);
5167 insertShadowCheck(Operand, &
I);
5169 setShadow(&
I, getCleanShadow(&
I));
5170 setOrigin(&
I, getCleanOrigin());
5174struct VarArgHelperBase :
public VarArgHelper {
5176 MemorySanitizer &MS;
5177 MemorySanitizerVisitor &MSV;
5179 const unsigned VAListTagSize;
5181 VarArgHelperBase(
Function &
F, MemorySanitizer &MS,
5182 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
5183 :
F(
F), MS(MS), MSV(MSV), VAListTagSize(VAListTagSize) {}
5187 return IRB.
CreateAdd(
Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
5203 return getShadowPtrForVAArgument(IRB, ArgOffset);
5217 unsigned BaseOffset) {
5226 TailSize,
Align(8));
5231 Value *VAListTag =
I.getArgOperand(0);
5233 auto [ShadowPtr, OriginPtr] = MSV.getShadowOriginPtr(
5234 VAListTag, IRB, IRB.
getInt8Ty(), Alignment,
true);
5237 VAListTagSize, Alignment,
false);
5244 unpoisonVAListTagForInst(
I);
5250 unpoisonVAListTagForInst(
I);
5255struct VarArgAMD64Helper :
public VarArgHelperBase {
5258 static const unsigned AMD64GpEndOffset = 48;
5259 static const unsigned AMD64FpEndOffsetSSE = 176;
5261 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
5263 unsigned AMD64FpEndOffset;
5266 Value *VAArgOverflowSize =
nullptr;
5268 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5270 VarArgAMD64Helper(
Function &
F, MemorySanitizer &MS,
5271 MemorySanitizerVisitor &MSV)
5272 : VarArgHelperBase(
F, MS, MSV, 24) {
5273 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
5274 for (
const auto &Attr :
F.getAttributes().getFnAttrs()) {
5275 if (Attr.isStringAttribute() &&
5276 (Attr.getKindAsString() ==
"target-features")) {
5277 if (Attr.getValueAsString().contains(
"-sse"))
5278 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
5284 ArgKind classifyArgument(
Value *arg) {
5287 if (
T->isX86_FP80Ty())
5289 if (
T->isFPOrFPVectorTy())
5290 return AK_FloatingPoint;
5291 if (
T->isIntegerTy() &&
T->getPrimitiveSizeInBits() <= 64)
5292 return AK_GeneralPurpose;
5293 if (
T->isPointerTy())
5294 return AK_GeneralPurpose;
5307 unsigned GpOffset = 0;
5308 unsigned FpOffset = AMD64GpEndOffset;
5309 unsigned OverflowOffset = AMD64FpEndOffset;
5314 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
5321 assert(
A->getType()->isPointerTy());
5323 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
5325 unsigned BaseOffset = OverflowOffset;
5326 Value *ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
5327 Value *OriginBase =
nullptr;
5328 if (MS.TrackOrigins)
5329 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
5330 OverflowOffset += AlignedSize;
5333 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
5337 Value *ShadowPtr, *OriginPtr;
5338 std::tie(ShadowPtr, OriginPtr) =
5343 if (MS.TrackOrigins)
5347 ArgKind AK = classifyArgument(
A);
5348 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
5350 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
5352 Value *ShadowBase, *OriginBase =
nullptr;
5354 case AK_GeneralPurpose:
5355 ShadowBase = getShadowPtrForVAArgument(IRB, GpOffset);
5356 if (MS.TrackOrigins)
5357 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset);
5361 case AK_FloatingPoint:
5362 ShadowBase = getShadowPtrForVAArgument(IRB, FpOffset);
5363 if (MS.TrackOrigins)
5364 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
5371 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5373 unsigned BaseOffset = OverflowOffset;
5374 ShadowBase = getShadowPtrForVAArgument(IRB, OverflowOffset);
5375 if (MS.TrackOrigins) {
5376 OriginBase = getOriginPtrForVAArgument(IRB, OverflowOffset);
5378 OverflowOffset += AlignedSize;
5381 CleanUnusedTLS(IRB, ShadowBase, BaseOffset);
5390 Value *Shadow = MSV.getShadow(
A);
5392 if (MS.TrackOrigins) {
5393 Value *Origin = MSV.getOrigin(
A);
5395 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
5401 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
5402 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5405 void finalizeInstrumentation()
override {
5406 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5407 "finalizeInstrumentation called twice");
5408 if (!VAStartInstrumentationList.
empty()) {
5415 ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize);
5422 Intrinsic::umin, CopySize,
5426 if (MS.TrackOrigins) {
5436 for (
CallInst *OrigInst : VAStartInstrumentationList) {
5437 NextNodeIRBuilder IRB(OrigInst);
5438 Value *VAListTag = OrigInst->getArgOperand(0);
5442 ConstantInt::get(MS.IntptrTy, 16)),
5445 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5447 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5448 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5450 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5452 if (MS.TrackOrigins)
5453 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
5454 Alignment, AMD64FpEndOffset);
5457 ConstantInt::get(MS.IntptrTy, 8)),
5459 Value *OverflowArgAreaPtr =
5460 IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
5461 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
5462 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
5463 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
5467 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
5469 if (MS.TrackOrigins) {
5472 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
5480struct VarArgAArch64Helper :
public VarArgHelperBase {
5481 static const unsigned kAArch64GrArgSize = 64;
5482 static const unsigned kAArch64VrArgSize = 128;
5484 static const unsigned AArch64GrBegOffset = 0;
5485 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
5487 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
5488 static const unsigned AArch64VrEndOffset =
5489 AArch64VrBegOffset + kAArch64VrArgSize;
5490 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
5493 Value *VAArgOverflowSize =
nullptr;
5495 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
5497 VarArgAArch64Helper(
Function &
F, MemorySanitizer &MS,
5498 MemorySanitizerVisitor &MSV)
5499 : VarArgHelperBase(
F, MS, MSV, 32) {}
5502 std::pair<ArgKind, uint64_t> classifyArgument(
Type *
T) {
5503 if (
T->isIntOrPtrTy() &&
T->getPrimitiveSizeInBits() <= 64)
5504 return {AK_GeneralPurpose, 1};
5505 if (
T->isFloatingPointTy() &&
T->getPrimitiveSizeInBits() <= 128)
5506 return {AK_FloatingPoint, 1};
5508 if (
T->isArrayTy()) {
5509 auto R = classifyArgument(
T->getArrayElementType());
5510 R.second *=
T->getScalarType()->getArrayNumElements();
5515 auto R = classifyArgument(FV->getScalarType());
5516 R.second *= FV->getNumElements();
5521 return {AK_Memory, 0};
5534 unsigned GrOffset = AArch64GrBegOffset;
5535 unsigned VrOffset = AArch64VrBegOffset;
5536 unsigned OverflowOffset = AArch64VAEndOffset;
5541 auto [AK, RegNum] = classifyArgument(
A->getType());
5542 if (AK == AK_GeneralPurpose &&
5543 (GrOffset + RegNum * 8) > AArch64GrEndOffset)
5545 if (AK == AK_FloatingPoint &&
5546 (VrOffset + RegNum * 16) > AArch64VrEndOffset)
5550 case AK_GeneralPurpose:
5551 Base = getShadowPtrForVAArgument(IRB, GrOffset);
5552 GrOffset += 8 * RegNum;
5554 case AK_FloatingPoint:
5555 Base = getShadowPtrForVAArgument(IRB, VrOffset);
5556 VrOffset += 16 * RegNum;
5563 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5565 unsigned BaseOffset = OverflowOffset;
5566 Base = getShadowPtrForVAArgument(IRB, BaseOffset);
5567 OverflowOffset += AlignedSize;
5570 CleanUnusedTLS(IRB,
Base, BaseOffset);
5582 ConstantInt::get(IRB.
getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
5583 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
5590 ConstantInt::get(MS.IntptrTy, offset)),
5599 ConstantInt::get(MS.IntptrTy, offset)),
5602 return IRB.
CreateSExt(SaveArea32, MS.IntptrTy);
5605 void finalizeInstrumentation()
override {
5606 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
5607 "finalizeInstrumentation called twice");
5608 if (!VAStartInstrumentationList.empty()) {
5615 ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize);
5622 Intrinsic::umin, CopySize,
5628 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
5629 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
5633 for (
CallInst *OrigInst : VAStartInstrumentationList) {
5634 NextNodeIRBuilder IRB(OrigInst);
5636 Value *VAListTag = OrigInst->getArgOperand(0);
5653 Value *StackSaveAreaPtr =
5654 IRB.
CreateIntToPtr(getVAField64(IRB, VAListTag, 0), RegSaveAreaPtrTy);
5657 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
5658 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
5661 IRB.
CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea), RegSaveAreaPtrTy);
5664 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
5665 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
5668 IRB.
CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea), RegSaveAreaPtrTy);
5674 Value *GrRegSaveAreaShadowPtrOff =
5675 IRB.
CreateAdd(GrArgSize, GrOffSaveArea);
5677 Value *GrRegSaveAreaShadowPtr =
5678 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5684 Value *GrCopySize = IRB.
CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
5690 Value *VrRegSaveAreaShadowPtrOff =
5691 IRB.
CreateAdd(VrArgSize, VrOffSaveArea);
5693 Value *VrRegSaveAreaShadowPtr =
5694 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5701 VrRegSaveAreaShadowPtrOff);
5702 Value *VrCopySize = IRB.
CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
5708 Value *StackSaveAreaShadowPtr =
5709 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5714 VAArgTLSCopy, IRB.
getInt32(AArch64VAEndOffset));
5717 Align(16), VAArgOverflowSize);
5723struct VarArgPowerPCHelper :
public VarArgHelperBase {
5725 Value *VAArgSize =
nullptr;
5727 VarArgPowerPCHelper(
Function &
F, MemorySanitizer &MS,
5728 MemorySanitizerVisitor &MSV,
unsigned VAListTagSize)
5729 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
5739 Triple TargetTriple(
F.getParent()->getTargetTriple());
5743 if (TargetTriple.isPPC64()) {
5744 if (TargetTriple.isPPC64ELFv2ABI())
5752 unsigned VAArgOffset = VAArgBase;
5756 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
5758 assert(
A->getType()->isPointerTy());
5760 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
5763 ArgAlign =
Align(8);
5764 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
5767 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
5769 Value *AShadowPtr, *AOriginPtr;
5770 std::tie(AShadowPtr, AOriginPtr) =
5771 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
5781 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
5783 if (
A->getType()->isArrayTy()) {
5786 Type *ElementTy =
A->getType()->getArrayElementType();
5788 ArgAlign =
Align(
DL.getTypeAllocSize(ElementTy));
5789 }
else if (
A->getType()->isVectorTy()) {
5791 ArgAlign =
Align(ArgSize);
5794 ArgAlign =
Align(8);
5795 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
5796 if (
DL.isBigEndian()) {
5800 VAArgOffset += (8 - ArgSize);
5804 getShadowPtrForVAArgument(IRB, VAArgOffset - VAArgBase, ArgSize);
5808 VAArgOffset += ArgSize;
5812 VAArgBase = VAArgOffset;
5816 ConstantInt::get(MS.IntptrTy, VAArgOffset - VAArgBase);
5819 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
5822 void finalizeInstrumentation()
override {
5823 assert(!VAArgSize && !VAArgTLSCopy &&
5824 "finalizeInstrumentation called twice");
5827 Value *CopySize = VAArgSize;
5829 if (!VAStartInstrumentationList.empty()) {
5839 Intrinsic::umin, CopySize,
5847 Triple TargetTriple(
F.getParent()->getTargetTriple());
5848 for (
CallInst *OrigInst : VAStartInstrumentationList) {
5849 NextNodeIRBuilder IRB(OrigInst);
5850 Value *VAListTag = OrigInst->getArgOperand(0);
5854 if (!TargetTriple.isPPC64()) {
5856 IRB.
CreateAdd(RegSaveAreaPtrPtr, ConstantInt::get(MS.IntptrTy, 8));
5858 RegSaveAreaPtrPtr = IRB.
CreateIntToPtr(RegSaveAreaPtrPtr, MS.PtrTy);
5861 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
5863 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
5865 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
5866 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
5868 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
5875struct VarArgSystemZHelper :
public VarArgHelperBase {
5876 static const unsigned SystemZGpOffset = 16;
5877 static const unsigned SystemZGpEndOffset = 56;
5878 static const unsigned SystemZFpOffset = 128;
5879 static const unsigned SystemZFpEndOffset = 160;
5880 static const unsigned SystemZMaxVrArgs = 8;
5881 static const unsigned SystemZRegSaveAreaSize = 160;
5882 static const unsigned SystemZOverflowOffset = 160;
5883 static const unsigned SystemZVAListTagSize = 32;
5884 static const unsigned SystemZOverflowArgAreaPtrOffset = 16;
5885 static const unsigned SystemZRegSaveAreaPtrOffset = 24;
5887 bool IsSoftFloatABI;
5890 Value *VAArgOverflowSize =
nullptr;
5892 enum class ArgKind {
5900 enum class ShadowExtension {
None,
Zero, Sign };
5902 VarArgSystemZHelper(
Function &
F, MemorySanitizer &MS,
5903 MemorySanitizerVisitor &MSV)
5904 : VarArgHelperBase(
F, MS, MSV, SystemZVAListTagSize),
5905 IsSoftFloatABI(
F.getFnAttribute(
"use-soft-float").getValueAsBool()) {}
5907 ArgKind classifyArgument(
Type *
T) {
5914 if (
T->isIntegerTy(128) ||
T->isFP128Ty())
5915 return ArgKind::Indirect;
5916 if (
T->isFloatingPointTy())
5917 return IsSoftFloatABI ? ArgKind::GeneralPurpose : ArgKind::FloatingPoint;
5918 if (
T->isIntegerTy() ||
T->isPointerTy())
5919 return ArgKind::GeneralPurpose;
5920 if (
T->isVectorTy())
5921 return ArgKind::Vector;
5922 return ArgKind::Memory;
5925 ShadowExtension getShadowExtension(
const CallBase &CB,
unsigned ArgNo) {
5935 return ShadowExtension::Zero;
5939 return ShadowExtension::Sign;
5941 return ShadowExtension::None;
5945 unsigned GpOffset = SystemZGpOffset;
5946 unsigned FpOffset = SystemZFpOffset;
5947 unsigned VrIndex = 0;
5948 unsigned OverflowOffset = SystemZOverflowOffset;
5955 ArgKind AK = classifyArgument(
T);
5956 if (AK == ArgKind::Indirect) {
5958 AK = ArgKind::GeneralPurpose;
5960 if (AK == ArgKind::GeneralPurpose && GpOffset >= SystemZGpEndOffset)
5961 AK = ArgKind::Memory;
5962 if (AK == ArgKind::FloatingPoint && FpOffset >= SystemZFpEndOffset)
5963 AK = ArgKind::Memory;
5964 if (AK == ArgKind::Vector && (VrIndex >= SystemZMaxVrArgs || !IsFixed))
5965 AK = ArgKind::Memory;
5966 Value *ShadowBase =
nullptr;
5967 Value *OriginBase =
nullptr;
5968 ShadowExtension SE = ShadowExtension::None;
5970 case ArgKind::GeneralPurpose: {
5975 SE = getShadowExtension(CB, ArgNo);
5977 if (SE == ShadowExtension::None) {
5979 assert(ArgAllocSize <= ArgSize);
5980 GapSize = ArgSize - ArgAllocSize;
5982 ShadowBase = getShadowAddrForVAArgument(IRB, GpOffset + GapSize);
5983 if (MS.TrackOrigins)
5984 OriginBase = getOriginPtrForVAArgument(IRB, GpOffset + GapSize);
5986 GpOffset += ArgSize;
5992 case ArgKind::FloatingPoint: {
6001 ShadowBase = getShadowAddrForVAArgument(IRB, FpOffset);
6002 if (MS.TrackOrigins)
6003 OriginBase = getOriginPtrForVAArgument(IRB, FpOffset);
6005 FpOffset += ArgSize;
6011 case ArgKind::Vector: {
6018 case ArgKind::Memory: {
6026 SE = getShadowExtension(CB, ArgNo);
6028 SE == ShadowExtension::None ? ArgSize - ArgAllocSize : 0;
6030 getShadowAddrForVAArgument(IRB, OverflowOffset + GapSize);
6031 if (MS.TrackOrigins)
6033 getOriginPtrForVAArgument(IRB, OverflowOffset + GapSize);
6034 OverflowOffset += ArgSize;
6041 case ArgKind::Indirect:
6044 if (ShadowBase ==
nullptr)
6046 Value *Shadow = MSV.getShadow(
A);
6047 if (SE != ShadowExtension::None)
6048 Shadow = MSV.CreateShadowCast(IRB, Shadow, IRB.
getInt64Ty(),
6049 SE == ShadowExtension::Sign);
6050 ShadowBase = IRB.
CreateIntToPtr(ShadowBase, MS.PtrTy,
"_msarg_va_s");
6052 if (MS.TrackOrigins) {
6053 Value *Origin = MSV.getOrigin(
A);
6055 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
6059 Constant *OverflowSize = ConstantInt::get(
6060 IRB.
getInt64Ty(), OverflowOffset - SystemZOverflowOffset);
6061 IRB.
CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
6068 ConstantInt::get(MS.IntptrTy, SystemZRegSaveAreaPtrOffset)),
6071 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6073 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6074 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(), Alignment,
6079 unsigned RegSaveAreaSize =
6080 IsSoftFloatABI ? SystemZGpEndOffset : SystemZRegSaveAreaSize;
6081 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6083 if (MS.TrackOrigins)
6084 IRB.
CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
6085 Alignment, RegSaveAreaSize);
6094 ConstantInt::get(MS.IntptrTy, SystemZOverflowArgAreaPtrOffset)),
6096 Value *OverflowArgAreaPtr = IRB.
CreateLoad(MS.PtrTy, OverflowArgAreaPtrPtr);
6097 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
6099 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
6100 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.
getInt8Ty(),
6103 SystemZOverflowOffset);
6104 IRB.
CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
6106 if (MS.TrackOrigins) {
6108 SystemZOverflowOffset);
6109 IRB.
CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
6114 void finalizeInstrumentation()
override {
6115 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
6116 "finalizeInstrumentation called twice");
6117 if (!VAStartInstrumentationList.empty()) {
6124 IRB.
CreateAdd(ConstantInt::get(MS.IntptrTy, SystemZOverflowOffset),
6132 Intrinsic::umin, CopySize,
6136 if (MS.TrackOrigins) {
6146 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6147 NextNodeIRBuilder IRB(OrigInst);
6148 Value *VAListTag = OrigInst->getArgOperand(0);
6149 copyRegSaveArea(IRB, VAListTag);
6150 copyOverflowArea(IRB, VAListTag);
6156struct VarArgI386Helper :
public VarArgHelperBase {
6158 Value *VAArgSize =
nullptr;
6160 VarArgI386Helper(
Function &
F, MemorySanitizer &MS,
6161 MemorySanitizerVisitor &MSV)
6162 : VarArgHelperBase(
F, MS, MSV, 4) {}
6166 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6167 unsigned VAArgOffset = 0;
6170 bool IsByVal = CB.
paramHasAttr(ArgNo, Attribute::ByVal);
6172 assert(
A->getType()->isPointerTy());
6174 uint64_t ArgSize =
DL.getTypeAllocSize(RealTy);
6176 if (ArgAlign < IntptrSize)
6177 ArgAlign =
Align(IntptrSize);
6178 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
6180 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6182 Value *AShadowPtr, *AOriginPtr;
6183 std::tie(AShadowPtr, AOriginPtr) =
6184 MSV.getShadowOriginPtr(
A, IRB, IRB.
getInt8Ty(),
6194 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
6196 VAArgOffset =
alignTo(VAArgOffset, ArgAlign);
6197 if (
DL.isBigEndian()) {
6200 if (ArgSize < IntptrSize)
6201 VAArgOffset += (IntptrSize - ArgSize);
6204 Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6207 VAArgOffset += ArgSize;
6213 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
6216 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
6219 void finalizeInstrumentation()
override {
6220 assert(!VAArgSize && !VAArgTLSCopy &&
6221 "finalizeInstrumentation called twice");
6224 Value *CopySize = VAArgSize;
6226 if (!VAStartInstrumentationList.empty()) {
6235 Intrinsic::umin, CopySize,
6243 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6244 NextNodeIRBuilder IRB(OrigInst);
6245 Value *VAListTag = OrigInst->getArgOperand(0);
6246 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
6247 Value *RegSaveAreaPtrPtr =
6249 PointerType::get(*MS.C, 0));
6250 Value *RegSaveAreaPtr =
6251 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
6252 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6254 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6256 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6257 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
6259 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6267struct VarArgGenericHelper :
public VarArgHelperBase {
6269 Value *VAArgSize =
nullptr;
6271 VarArgGenericHelper(
Function &
F, MemorySanitizer &MS,
6272 MemorySanitizerVisitor &MSV,
const unsigned VAListTagSize)
6273 : VarArgHelperBase(
F, MS, MSV, VAListTagSize) {}
6276 unsigned VAArgOffset = 0;
6278 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6283 uint64_t ArgSize =
DL.getTypeAllocSize(
A->getType());
6284 if (
DL.isBigEndian()) {
6287 if (ArgSize < IntptrSize)
6288 VAArgOffset += (IntptrSize - ArgSize);
6290 Value *
Base = getShadowPtrForVAArgument(IRB, VAArgOffset, ArgSize);
6291 VAArgOffset += ArgSize;
6292 VAArgOffset =
alignTo(VAArgOffset, IntptrSize);
6298 Constant *TotalVAArgSize = ConstantInt::get(MS.IntptrTy, VAArgOffset);
6301 IRB.
CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
6304 void finalizeInstrumentation()
override {
6305 assert(!VAArgSize && !VAArgTLSCopy &&
6306 "finalizeInstrumentation called twice");
6309 Value *CopySize = VAArgSize;
6311 if (!VAStartInstrumentationList.empty()) {
6320 Intrinsic::umin, CopySize,
6328 for (
CallInst *OrigInst : VAStartInstrumentationList) {
6329 NextNodeIRBuilder IRB(OrigInst);
6330 Value *VAListTag = OrigInst->getArgOperand(0);
6331 Type *RegSaveAreaPtrTy = PointerType::getUnqual(*MS.C);
6332 Value *RegSaveAreaPtrPtr =
6334 PointerType::get(*MS.C, 0));
6335 Value *RegSaveAreaPtr =
6336 IRB.
CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
6337 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
6339 unsigned IntptrSize =
DL.getTypeStoreSize(MS.IntptrTy);
6341 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
6342 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.
getInt8Ty(),
6344 IRB.
CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
6352using VarArgARM32Helper = VarArgGenericHelper;
6353using VarArgRISCVHelper = VarArgGenericHelper;
6354using VarArgMIPSHelper = VarArgGenericHelper;
6355using VarArgLoongArch64Helper = VarArgGenericHelper;
6358struct VarArgNoOpHelper :
public VarArgHelper {
6359 VarArgNoOpHelper(
Function &
F, MemorySanitizer &MS,
6360 MemorySanitizerVisitor &MSV) {}
6368 void finalizeInstrumentation()
override {}
6374 MemorySanitizerVisitor &Visitor) {
6377 Triple TargetTriple(Func.getParent()->getTargetTriple());
6380 return new VarArgI386Helper(Func, Msan, Visitor);
6383 return new VarArgAMD64Helper(Func, Msan, Visitor);
6385 if (TargetTriple.
isARM())
6386 return new VarArgARM32Helper(Func, Msan, Visitor, 4);
6389 return new VarArgAArch64Helper(Func, Msan, Visitor);
6392 return new VarArgSystemZHelper(Func, Msan, Visitor);
6397 return new VarArgPowerPCHelper(Func, Msan, Visitor, 12);
6400 return new VarArgPowerPCHelper(Func, Msan, Visitor, 8);
6403 return new VarArgRISCVHelper(Func, Msan, Visitor, 4);
6406 return new VarArgRISCVHelper(Func, Msan, Visitor, 8);
6409 return new VarArgMIPSHelper(Func, Msan, Visitor, 4);
6412 return new VarArgMIPSHelper(Func, Msan, Visitor, 8);
6415 return new VarArgLoongArch64Helper(Func, Msan, Visitor,
6418 return new VarArgNoOpHelper(Func, Msan, Visitor);
6425 if (
F.hasFnAttribute(Attribute::DisableSanitizerInstrumentation))
6428 MemorySanitizerVisitor Visitor(
F, *
this, TLI);
6432 B.addAttribute(Attribute::Memory).addAttribute(Attribute::Speculatable);
6435 return Visitor.runOnFunction();
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isStore(int Opcode)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static cl::opt< ITMode > IT(cl::desc("IT block support"), cl::Hidden, cl::init(DefaultIT), cl::values(clEnumValN(DefaultIT, "arm-default-it", "Generate any type of IT block"), clEnumValN(RestrictedIT, "arm-restrict-it", "Disallow complex IT blocks")))
static const size_t kNumberOfAccessSizes
VarLocInsertPt getNextNode(const DbgRecord *DVR)
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AtomicOrdering addReleaseOrdering(AtomicOrdering AO)
static AtomicOrdering addAcquireOrdering(AtomicOrdering AO)
static bool isAMustTailRetVal(Value *RetVal)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
static bool runOnFunction(Function &F, bool PostInlining)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This is the interface for a simple mod/ref and alias analysis over globals.
static size_t TypeSizeToSizeIndex(uint32_t TypeSize)
Module.h This file contains the declarations for the Module class.
static const MemoryMapParams Linux_LoongArch64_MemoryMapParams
static const PlatformMemoryMapParams Linux_S390_MemoryMapParams
static const Align kMinOriginAlignment
static const MemoryMapParams Linux_X86_64_MemoryMapParams
static cl::opt< uint64_t > ClShadowBase("msan-shadow-base", cl::desc("Define custom MSan ShadowBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClDumpStrictInstructions("msan-dump-strict-instructions", cl::desc("print out instructions with default strict semantics"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams Linux_X86_MemoryMapParams
static cl::opt< uint64_t > ClOriginBase("msan-origin-base", cl::desc("Define custom MSan OriginBase"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClCheckConstantShadow("msan-check-constant-shadow", cl::desc("Insert checks for constant shadow values"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_LoongArch_MemoryMapParams
static const MemoryMapParams NetBSD_X86_64_MemoryMapParams
static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams
static const unsigned kOriginSize
static cl::opt< bool > ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false))
static cl::opt< int > ClTrackOrigins("msan-track-origins", cl::desc("Track origins (allocation sites) of poisoned memory"), cl::Hidden, cl::init(0))
Track origins of uninitialized values.
static cl::opt< int > ClInstrumentationWithCallThreshold("msan-instrumentation-with-call-threshold", cl::desc("If the function being instrumented requires more than " "this number of checks and origin stores, use callbacks instead of " "inline checks (-1 means never use callbacks)."), cl::Hidden, cl::init(3500))
static cl::opt< int > ClPoisonStackPattern("msan-poison-stack-pattern", cl::desc("poison uninitialized stack variables with the given pattern"), cl::Hidden, cl::init(0xff))
static const Align kShadowTLSAlignment
static cl::opt< bool > ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams
static Constant * getOrInsertGlobal(Module &M, StringRef Name, Type *Ty)
static cl::opt< bool > ClDumpStrictIntrinsics("msan-dump-strict-intrinsics", cl::desc("Prints 'unknown' intrinsics that were handled heuristically. " "Use -msan-dump-strict-instructions to print intrinsics that " "could not be handled exactly nor heuristically."), cl::Hidden, cl::init(false))
static const MemoryMapParams Linux_S390X_MemoryMapParams
static cl::opt< bool > ClPoisonUndef("msan-poison-undef", cl::desc("poison undef temps"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClPoisonStack("msan-poison-stack", cl::desc("poison uninitialized stack variables"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_I386_MemoryMapParams
const char kMsanInitName[]
static cl::opt< bool > ClPrintStackNames("msan-print-stack-names", cl::desc("Print name of local stack variable"), cl::Hidden, cl::init(true))
static const MemoryMapParams Linux_AArch64_MemoryMapParams
static cl::opt< uint64_t > ClAndMask("msan-and-mask", cl::desc("Define custom MSan AndMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleLifetimeIntrinsics("msan-handle-lifetime-intrinsics", cl::desc("when possible, poison scoped variables at the beginning of the scope " "(slower, but more precise)"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClKeepGoing("msan-keep-going", cl::desc("keep going after reporting a UMR"), cl::Hidden, cl::init(false))
static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams
static GlobalVariable * createPrivateConstGlobalForString(Module &M, StringRef Str)
Create a non-const global initialized with the given string.
static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams
static const size_t kNumberOfAccessSizes
static cl::opt< bool > ClEagerChecks("msan-eager-checks", cl::desc("check arguments and return values at function call boundaries"), cl::Hidden, cl::init(false))
static cl::opt< int > ClDisambiguateWarning("msan-disambiguate-warning-threshold", cl::desc("Define threshold for number of checks per " "debug location to force origin update."), cl::Hidden, cl::init(3))
static VarArgHelper * CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, MemorySanitizerVisitor &Visitor)
static const MemoryMapParams Linux_MIPS64_MemoryMapParams
static const MemoryMapParams Linux_PowerPC64_MemoryMapParams
static cl::opt< uint64_t > ClXorMask("msan-xor-mask", cl::desc("Define custom MSan XorMask"), cl::Hidden, cl::init(0))
static cl::opt< bool > ClHandleAsmConservative("msan-handle-asm-conservative", cl::desc("conservative handling of inline assembly"), cl::Hidden, cl::init(true))
static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams
static const PlatformMemoryMapParams FreeBSD_ARM_MemoryMapParams
static const unsigned kParamTLSSize
static cl::opt< bool > ClHandleICmp("msan-handle-icmp", cl::desc("propagate shadow through ICmpEQ and ICmpNE"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClEnableKmsan("msan-kernel", cl::desc("Enable KernelMemorySanitizer instrumentation"), cl::Hidden, cl::init(false))
static cl::opt< bool > ClPoisonStackWithCall("msan-poison-stack-with-call", cl::desc("poison uninitialized stack variables with a call"), cl::Hidden, cl::init(false))
static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams
static const unsigned kRetvalTLSSize
static const MemoryMapParams FreeBSD_AArch64_MemoryMapParams
const char kMsanModuleCtorName[]
static const MemoryMapParams FreeBSD_I386_MemoryMapParams
static cl::opt< bool > ClCheckAccessAddress("msan-check-access-address", cl::desc("report accesses through a pointer which has poisoned shadow"), cl::Hidden, cl::init(true))
static cl::opt< bool > ClDisableChecks("msan-disable-checks", cl::desc("Apply no_sanitize to the whole file"), cl::Hidden, cl::init(false))
FunctionAnalysisManager FAM
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Class for arbitrary precision integers.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
an instruction to allocate memory on the stack
void setAlignment(Align Align)
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
LLVM Basic Block Representation.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
This class represents a no-op cast from one type to another.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
MaybeAlign getParamAlign(unsigned ArgNo) const
Extract the alignment for a call or parameter (0=unknown).
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
Type * getParamElementType(unsigned ArgNo) const
Extract the elementtype type for a parameter.
Value * getArgOperand(unsigned i) const
void setArgOperand(unsigned i, Value *v)
FunctionType * getFunctionType() const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_SGT
signed greater than
@ ICMP_SGE
signed greater or equal
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with vector type with an element count and element type matchi...
This is the shared class of boolean and integer constants.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static ConstantInt * getBool(LLVMContext &Context, bool V)
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isZeroValue() const
Return true if the value is negative zero or null value.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
static bool shouldExecute(unsigned CounterName)
This instruction compares its operands according to the predicate given to the constructor.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
void setComdat(Comdat *C)
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ ExternalLinkage
Externally visible function.
Analysis pass providing a never-invalidated alias analysis result.
This instruction compares its operands according to the predicate given to the constructor.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Value * CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
LoadInst * CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name)
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
BasicBlock::iterator GetInsertPoint() const
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
ConstantInt * getInt8(uint8_t C)
Get a constant 8-bit value.
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Value * CreateUDiv(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ConstantInt * getIntN(unsigned N, uint64_t C)
Get a constant N-bit value, zero extended or truncated from a 64-bit value.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateIsNotNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg != 0.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Type * getVoidTy()
Fetch the type representing void.
StoreInst * CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, bool isVolatile=false)
CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateInBoundsPtrAdd(Value *Ptr, Value *Offset, const Twine &Name="")
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
std::vector< ConstraintInfo > ConstraintInfoVector
An analysis over an "outer" IR unit that provides access to an analysis manager over an "inner" IR un...
This instruction inserts a single (scalar) element into a VectorType value.
This instruction inserts a struct field of array element value into an aggregate value.
Base class for instruction visitors.
void visit(Iterator Start, Iterator End)
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
This class represents a cast from an integer to a pointer.
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
MDNode * createUnlikelyBranchWeights()
Return metadata containing two branch weights, with significant bias towards false destination.
This class wraps the llvm.memcpy intrinsic.
This class wraps the llvm.memmove intrinsic.
This class wraps the llvm.memset and llvm.memset.inline intrinsics.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void abandon()
Mark an analysis as abandoned.
This class represents a cast from a pointer to an integer.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
bool remove(const value_type &X)
Remove an item from the set vector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
This instruction constructs a fixed permutation of two input vectors.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
AttributeList getAttrList(LLVMContext *C, ArrayRef< unsigned > ArgNos, bool Signed, bool Ret=false, AttributeList AL=AttributeList()) const
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Triple - Helper class for working with autoconf configuration names.
bool isMIPS64() const
Tests whether the target is MIPS 64-bit (little and big endian).
bool isRISCV32() const
Tests whether the target is 32-bit RISC-V.
bool isPPC32() const
Tests whether the target is 32-bit PowerPC (little and big endian).
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isRISCV64() const
Tests whether the target is 64-bit RISC-V.
bool isLoongArch64() const
Tests whether the target is 64-bit LoongArch.
bool isMIPS32() const
Tests whether the target is MIPS 32-bit (little and big endian).
bool isARM() const
Tests whether the target is ARM (little and big endian).
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
bool isSystemZ() const
Tests whether the target is SystemZ.
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
'undef' values are things that do not have specified contents.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This represents the llvm.va_copy intrinsic.
This represents the llvm.va_start intrinsic.
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void setName(const Twine &Name)
Change the name of the value.
StringRef getName() const
Return a constant reference to the value's name.
Type * getElementType() const
This class represents zero extension of integer types.
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
This class provides various memory handling functions that manipulate MemoryBlock instances.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
initializer< Ty > init(const Ty &Val)
Function * Kernel
Summary of a kernel (=entry point for target offloading).
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
std::pair< Function *, FunctionCallee > getOrCreateSanitizerCtorAndInitFunctions(Module &M, StringRef CtorName, StringRef InitName, ArrayRef< Type * > InitArgTypes, ArrayRef< Value * > InitArgs, function_ref< void(Function *, FunctionCallee)> FunctionsCreatedCallback, StringRef VersionCheckName=StringRef(), bool Weak=false)
Creates sanitizer constructor function lazily.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Or
Bitwise or logical OR of integers.
std::pair< Instruction *, Value * > SplitBlockAndInsertSimpleForLoop(Value *End, Instruction *SplitBefore)
Insert a for (int i = 0; i < End; i++) loop structure (with the exception that End is assumed > 0,...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
constexpr unsigned BitWidth
void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
iterator_range< df_iterator< T > > depth_first(const T &G)
Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI, const TargetLibraryInfo *TLI)
Given a CallInst, check if it calls a string function known to CodeGen, and mark it with NoBuiltin if...
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
bool checkIfAlreadyInstrumented(Module &M, StringRef Flag)
Check if module has flag attached, if not add the flag.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
A CRTP mix-in to automatically provide informational APIs needed for passes.